diff --git "a/val.json" "b/val.json" deleted file mode 100644--- "a/val.json" +++ /dev/null @@ -1,29828 +0,0 @@ -[ - { - "library": "tensorflow", - "name": "assign", - "source_code": "def assign(self, value, use_locking = False, name = None, read_value = True): raise NotImplementedError", - "docstring": "Assigns a new value to the variable. This is essentially a shortcut for . Args: value: A . The new value for this variable. use_locking: If , use locking during the assignment. name: The name of the operation to be created read_value: if True, will return something which evaluates to the new value of the variable; if False will return the assign op. Returns: The updated variable. If is false, instead returns None in Eager mode and the assign op in graph mode.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", - "ast_data": "FunctionDef name:assign arguments arg:self arg:value arg:use_locking arg:name arg:read_value Raise raises:NotImplementedError" - }, - { - "library": "flexx", - "name": "copy_module", - "source_code": "def copy_module(module, app_dir): if isinstance(module, str): module = importlib.import_module(module) filename = module.__file__ if filename.endswith('__init__.py'): copydir(os.path.dirname(filename), os.path.join(app_dir, 'source', module.__name__)) elif filename.endswith('.py'): shutil.copy(filename, os.path.join(app_dir, 'source', module.__name__ + '.py'))", - "docstring": "Copy the source of the given module to the given application directory.", - "type": "function", - "file_path": "flexx\\flexx\\util\\freeze.py", - "ast_data": "FunctionDef name:copy_module arguments arg:module arg:app_dir If Call call:isinstance Assign Call call:import_module Assign If Call call:endswith If Call call:endswith" - }, - { - "library": "scikit-learn", - "name": "get_depth", - "source_code": "def get_depth(self): check_is_fitted(self) return self.tree_.max_depth", - "docstring": "Return the depth of the decision tree. The depth of a tree is the maximum distance between the root and any leaf. Returns ------- self.tree_.max_depth : int The maximum depth of the tree.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\tree\\_classes.py", - "ast_data": "FunctionDef name:get_depth arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "flatten", - "source_code": "def flatten(self): yield self for expr in self.get_source_expressions(): if expr: if hasattr(expr, 'flatten'): yield from expr.flatten() else: yield expr", - "docstring": "Recursively yield this expression and all subexpressions, in depth-first order.", - "type": "method", - "file_path": "django\\django\\db\\models\\expressions.py", - "ast_data": "FunctionDef name:flatten arguments arg:self For Call call:get_source_expressions If If Call call:hasattr" - }, - { - "library": "kornia", - "name": "build_laplacian_pyramid", - "source_code": "def build_laplacian_pyramid(input: Tensor, max_level: int, border_type: str = 'reflect', align_corners: bool = False) -> list[Tensor]: KORNIA_CHECK_SHAPE(input, ['B', 'C', 'H', 'W']) KORNIA_CHECK(isinstance(max_level, int) or max_level < 0, f'Invalid max_level, it must be a positive integer. Got: {max_level}') h = input.size()[2] w = input.size()[3] require_padding = not (is_powerof_two(w) or is_powerof_two(h)) if require_padding: padding = (0, find_next_powerof_two(w) - w, 0, find_next_powerof_two(h) - h) input = pad(input, padding, 'reflect') gaussian_pyramid: list[Tensor] = build_pyramid(input, max_level, border_type, align_corners) laplacian_pyramid: list[Tensor] = [] for i in range(max_level - 1): img_expand: Tensor = pyrup(gaussian_pyramid[i + 1], border_type, align_corners) laplacian: Tensor = gaussian_pyramid[i] - img_expand laplacian_pyramid.append(laplacian) laplacian_pyramid.append(gaussian_pyramid[-1]) return laplacian_pyramid", - "docstring": "Construct the Laplacian pyramid for a tensor image. The function constructs a vector of images and builds the Laplacian pyramid by recursively computing the difference after applying pyrUp to the adjacent layer in its Gaussian pyramid. See :cite: for more details. Args: input : the tensor to be used to construct the pyramid with shape :math:. max_level: 0-based index of the last (the smallest) pyramid layer. It must be non-negative. border_type: the padding mode to be applied before convolving. The expected modes are: `[(B, C, H, W), (B, C, H/2, W/2), ...]`", - "type": "function", - "file_path": "kornia\\kornia\\geometry\\transform\\pyramid.py", - "ast_data": "FunctionDef name:build_laplacian_pyramid arguments arg:input type:Tensor arg:max_level type:int arg:border_type type:str arg:align_corners type:bool Assign Assign Assign If Assign Assign Call call:pad For Call call:range Return return:yes" - }, - { - "library": "django", - "name": "create_cursor", - "source_code": "def create_cursor(self, name = None): raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a create_cursor() method')", - "docstring": "Create a cursor. Assume that a connection is established.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\base.py", - "ast_data": "FunctionDef name:create_cursor arguments arg:self arg:name Raise raises:NotImplementedError('subclasses of BaseDatabaseWrapper may require a create_cursor() method')" - }, - { - "library": "tensorflow", - "name": "close", - "source_code": "def close(self): if not self._closed: self.flush() self._session.run(self._close_op) self._closed = True", - "docstring": "Flushes the event file to disk and close the file. Call this method when you do not need the summary writer anymore.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer_v2.py", - "ast_data": "FunctionDef name:close arguments arg:self If Assign" - }, - { - "library": "django", - "name": "save_form", - "source_code": "def save_form(self, request, form, change): return form.save(commit = False)", - "docstring": "Given a ModelForm return an unsaved instance. `` is True if the object is being changed, and False if it's being added.", - "type": "method", - "file_path": "django\\django\\contrib\\admin\\options.py", - "ast_data": "FunctionDef name:save_form arguments arg:self arg:request arg:form arg:change Return return:yes" - }, - { - "library": "tensorflow", - "name": "rich_text_lines_from_rich_line_list", - "source_code": "def rich_text_lines_from_rich_line_list(rich_text_list, annotations = None): lines = [] font_attr_segs = {} for i, rl in enumerate(rich_text_list): if isinstance(rl, RichLine): lines.append(rl.text) if rl.font_attr_segs: font_attr_segs[i] = rl.font_attr_segs else: lines.append(rl) return RichTextLines(lines, font_attr_segs, annotations = annotations)", - "docstring": "Convert a list of RichLine objects or strings to a RichTextLines object. Args: rich_text_list: a list of RichLine objects or strings annotations: annotations for the resultant RichTextLines object. Returns: A corresponding RichTextLines object.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", - "ast_data": "FunctionDef name:rich_text_lines_from_rich_line_list arguments arg:rich_text_list arg:annotations Assign Assign For Call call:enumerate If Call call:isinstance If Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "reset_code_caches", - "source_code": "def reset_code_caches() -> None: import logging log = logging.getLogger(__name__) log.info('torch._dynamo.reset_code_caches') 'Clear compile caches that are keyed by code objects' with convert_frame.compile_lock: reset_code_state() for weak_code in convert_frame.input_codes.seen + convert_frame.output_codes.seen: code = weak_code() if code: reset_code(code) code_context.clear()", - "docstring": "Clears in-memory code cache, which is what stores compiled products. This resets less state than :func: and is mostly only used for testing purposes.", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\__init__.py", - "ast_data": "FunctionDef name:reset_code_caches arguments Assign Call call:getLogger With For Assign Call call:weak_code If" - }, - { - "library": "tensorflow", - "name": "get_generating_ops", - "source_code": "def get_generating_ops(ts): ts = make_list_of_t(ts, allow_graph = False) return [t.op for t in ts]", - "docstring": "Return all the generating ops of the tensors in . Args: ts: a list of Returns: A list of all the generating of the tensors in . Raises: TypeError: if cannot be converted to a list of .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\op_selector.py", - "ast_data": "FunctionDef name:get_generating_ops arguments arg:ts Assign Call call:make_list_of_t Return return:yes" - }, - { - "library": "scikit-learn", - "name": "hyperparameters", - "source_code": "@property def hyperparameters(self): r = [getattr(self, attr) for attr in dir(self) if attr.startswith('hyperparameter_')] return r", - "docstring": "Returns a list of all hyperparameter specifications.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", - "ast_data": "FunctionDef name:hyperparameters arguments arg:self Assign Return return:yes" - }, - { - "library": "mongo", - "name": "from_bid", - "source_code": "@classmethod def from_bid(cls: Type[Decimal128], value: bytes) -> Decimal128: if not isinstance(value, bytes): raise TypeError(f'value must be an instance of bytes, not {type(value)}') if len(value) ! = 16: raise ValueError('value must be exactly 16 bytes') return cls((_UNPACK_64(value[8:])[0], _UNPACK_64(value[: 8])[0]))", - "docstring": "Create an instance of :class: from Binary Integer Decimal string. :param value: 16 byte string (128-bit IEEE 754-2008 decimal floating point in Binary Integer Decimal (BID) format).", - "type": "method", - "file_path": "mongo\\bson\\decimal128.py", - "ast_data": "FunctionDef name:from_bid arguments arg:cls type:Type[Decimal128] arg:value type:bytes If Raise raises:TypeError(f'value must be an instance of bytes, not {type(value)}') If Compare op:NotEq Raise raises:ValueError('value must be exactly 16 bytes') Return return:yes" - }, - { - "library": "scipy", - "name": "__repr__", - "source_code": "def __repr__(self): return f'{self.__class__.__name__}(\\n{repr(self.zeros)}, \\n{repr(self.poles)}, \\n{repr(self.gain)}, \\ndt: {repr(self.dt)}\\n)'", - "docstring": "Return representation of the system.", - "type": "method", - "file_path": "scipy\\scipy\\signal\\_ltisys.py", - "ast_data": "FunctionDef name:__repr__ arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "execute", - "source_code": "def execute(self, fig): info = self._params renderer = fig._get_renderer() with getattr(renderer, '_draw_disabled', nullcontext)(): kwargs = get_tight_layout_figure(fig, fig.axes, get_subplotspec_list(fig.axes), renderer, pad = info['pad'], h_pad = info['h_pad'], w_pad = info['w_pad'], rect = info['rect']) if kwargs: fig.subplots_adjust(**kwargs)", - "docstring": "Execute tight_layout. This decides the subplot parameters given the padding that will allow the Axes labels to not be covered by other labels and Axes. Parameters ---------- fig : to perform layout on. See Also -------- .figure.Figure.tight_layout .pyplot.tight_layout", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py", - "ast_data": "FunctionDef name:execute arguments arg:self arg:fig Assign Assign Call call:_get_renderer With Assign Call call:get_tight_layout_figure If" - }, - { - "library": "mongo", - "name": "publish_connection_checked_in", - "source_code": "def publish_connection_checked_in(self, address: _Address, connection_id: int) -> None: event = ConnectionCheckedInEvent(address, connection_id) for subscriber in self.__cmap_listeners: try: subscriber.connection_checked_in(event) except Exception: _handle_exception()", - "docstring": "Publish a :class: to all connection listeners.", - "type": "method", - "file_path": "mongo\\pymongo\\monitoring.py", - "ast_data": "FunctionDef name:publish_connection_checked_in arguments arg:self arg:address type:_Address arg:connection_id type:int Assign Call call:ConnectionCheckedInEvent For Try ExceptHandler" - }, - { - "library": "mongo", - "name": "hedge", - "source_code": "@property def hedge(self) -> Optional[_Hedge]: if self.__hedge is not None: warnings.warn(\"The read preference 'hedge' option is deprecated in PyMongo 4.12+ because hedged reads are deprecated in MongoDB version 8.0+. Support for 'hedge' will be removed in PyMongo 5.0.\", DeprecationWarning, stacklevel = 2) return self.__hedge", - "docstring": "**DEPRECATED** - The read preference 'hedge' option is deprecated in PyMongo 4.12+ because hedged reads are deprecated in MongoDB version 8.0+. Support for 'hedge' will be removed in PyMongo 5.0. The read preference ``:: >>> Nearest(hedge={'enabled': False}) .. versionadded:: 3.11", - "type": "method", - "file_path": "mongo\\pymongo\\read_preferences.py", - "ast_data": "FunctionDef name:hedge arguments arg:self If Compare op:IsNot Return return:yes" - }, - { - "library": "pytorch", - "name": "__create_chunk_list__", - "source_code": "def __create_chunk_list__(self) -> list[object]: raise NotImplementedError('_Checkpointable._create_chunk_list is not implemented')", - "docstring": "Return a list of based on object's contents.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\_checkpointable.py", - "ast_data": "FunctionDef name:__create_chunk_list__ arguments arg:self Raise raises:NotImplementedError('_Checkpointable._create_chunk_list is not implemented')" - }, - { - "library": "matplotlib", - "name": "get_navigate_mode", - "source_code": "def get_navigate_mode(self): return self._navigate_mode", - "docstring": "Get the navigation toolbar button status: 'PAN', 'ZOOM', or None.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", - "ast_data": "FunctionDef name:get_navigate_mode arguments arg:self Return return:yes" - }, - { - "library": "pandas", - "name": "is_scalar_indexer", - "source_code": "def is_scalar_indexer(indexer, ndim: int) -> bool: if ndim = = 1 and is_integer(indexer): return True if isinstance(indexer, tuple) and len(indexer) = = ndim: return all((is_integer(x) for x in indexer)) return False", - "docstring": "Return True if we are all scalar indexers. Parameters ---------- indexer : object ndim : int Number of dimensions in the object being indexed. Returns ------- bool", - "type": "function", - "file_path": "pandas\\pandas\\core\\indexers\\utils.py", - "ast_data": "FunctionDef name:is_scalar_indexer arguments arg:indexer arg:ndim type:int If BoolOp Compare op:Eq Call call:is_integer Return return:yes If BoolOp Call call:isinstance Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "scikit-learn", - "name": "fit", - "source_code": "def fit(self, raw_documents, y = None): self.fit_transform(raw_documents) return self", - "docstring": "Learn a vocabulary dictionary of all tokens in the raw documents. Parameters ---------- raw_documents : iterable An iterable which generates either str, unicode or file objects. y : None This parameter is ignored. Returns ------- self : object Fitted vectorizer.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py", - "ast_data": "FunctionDef name:fit arguments arg:self arg:raw_documents arg:y Return return:yes" - }, - { - "library": "numpy", - "name": "__generator_ctor", - "source_code": "def __generator_ctor(bit_generator_name = 'MT19937', bit_generator_ctor = __bit_generator_ctor): if isinstance(bit_generator_name, BitGenerator): return Generator(bit_generator_name) return Generator(bit_generator_ctor(bit_generator_name))", - "docstring": "Pickling helper function that returns a Generator object Parameters ---------- bit_generator_name : str or BitGenerator String containing the core BitGenerator's name or a BitGenerator instance bit_generator_ctor : callable, optional Callable function that takes bit_generator_name as its only argument and returns an instantized bit generator. Returns ------- rg : Generator Generator using the named core BitGenerator", - "type": "function", - "file_path": "numpy\\numpy\\random\\_pickle.py", - "ast_data": "FunctionDef name:__generator_ctor arguments arg:bit_generator_name arg:bit_generator_ctor If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "sphinx", - "name": "physical_lines_for_line", - "source_code": "def physical_lines_for_line(self, line: list[Cell]) -> int: physical_lines = 1 for cell in line: physical_lines = max(physical_lines, len(cell.wrapped)) return physical_lines", - "docstring": "For a given line, compute the number of physical lines it spans due to text wrapping.", - "type": "method", - "file_path": "sphinx\\sphinx\\writers\\text.py", - "ast_data": "FunctionDef name:physical_lines_for_line arguments arg:self arg:line type:list[Cell] Assign For Assign Call call:max Return return:yes" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, loc): super().__init__(_api.check_getitem({'bottom': 0, 'top': 0, 'left': 1, 'right': 1}, loc = loc)) self._loc = loc self._pos = {'bottom': 0, 'top': 1, 'left': 0, 'right': 1}[loc] self._path = Path(self._to_xy((0, 1), const = self._pos))", - "docstring": "``: y-axis.", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axislines.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:loc Assign Assign Assign Call call:Path" - }, - { - "library": "pandas", - "name": "__from_arrow__", - "source_code": "def __from_arrow__(self, array: pyarrow.Array | pyarrow.ChunkedArray) -> BaseStringArray: if self.storage = = 'pyarrow': if self._na_value is libmissing.NA: from pandas.core.arrays.string_arrow import ArrowStringArray return ArrowStringArray(array) else: from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics return ArrowStringArrayNumpySemantics(array) else: import pyarrow if isinstance(array, pyarrow.Array): chunks = [array] else: chunks = array.chunks results = [] for arr in chunks: arr = arr.to_numpy(zero_copy_only = False) arr = ensure_string_array(arr, na_value = self.na_value) results.append(arr) if len(chunks) = = 0: arr = np.array([], dtype = object) else: arr = np.concatenate(results) new_string_array = StringArray.__new__(StringArray) NDArrayBacked.__init__(new_string_array, arr, self) return new_string_array", - "docstring": "Construct StringArray from pyarrow Array/ChunkedArray.", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\string_.py", - "ast_data": "FunctionDef name:__from_arrow__ arguments arg:self arg:array type:pyarrow.Array | pyarrow.ChunkedArray If Compare op:Eq If Compare op:Is Return return:yes Return return:yes If Call call:isinstance Assign Assign Assign For Assign Call call:to_numpy Assign Call call:ensure_string_array If Compare op:Eq Assign Call call:array Assign Call call:concatenate Assign Call call:__new__ Return return:yes" - }, - { - "library": "tensorflow", - "name": "next_sample", - "source_code": "def next_sample(uid): return next(_SHARED_SEQUENCES[uid])", - "docstring": "Gets the next value from the generator . To allow multiple generators to be used at the same time, we use to get a specific one. A single generator would cause the validation to overwrite the training generator. Args: uid: int, generator identifier Returns: The next value of generator .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py", - "ast_data": "FunctionDef name:next_sample arguments arg:uid Return return:yes" - }, - { - "library": "tensorflow", - "name": "to_json", - "source_code": "def to_json(self, **kwargs): model_config = self._updated_config() return json.dumps(model_config, default = json_utils.get_json_type, **kwargs)", - "docstring": "Returns a JSON string containing the network configuration. To load a network from a JSON save file, use . Args: **kwargs: Additional keyword arguments to be passed to . Returns: A JSON string.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py", - "ast_data": "FunctionDef name:to_json arguments arg:self kwarg:kwargs Assign Call call:_updated_config Return return:yes" - }, - { - "library": "matplotlib", - "name": "text", - "source_code": "@_docstring.interpd def text(self, x, y, s, fontdict = None, **kwargs): effective_kwargs = {'transform': self.transSubfigure, **(fontdict if fontdict is not None else {}), **kwargs} text = Text(x = x, y = y, text = s, **effective_kwargs) text.set_figure(self) text.stale_callback = _stale_figure_callback self.texts.append(text) text._remove_method = self.texts.remove self.stale = True return text", - "docstring": "Add text to figure. Parameters ---------- x, y : float The position to place the text. By default, this is in figure coordinates, floats in [0, 1]. The coordinate system can be changed using the *transform* keyword. s : str The text string. fontdict : dict, optional A dictionary to override the default text properties. If not given, the defaults are determined by :rc:. Properties passed as *kwargs* override the corresponding ones given in *fontdict*. Returns ------- Other Parameters ---------------- **kwargs : properties Other miscellaneous text parameters. %(Text:kwdoc)s See Also -------- .Axes.text .pyplot.text", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\figure.py", - "ast_data": "FunctionDef name:text arguments arg:self arg:x arg:y arg:s arg:fontdict kwarg:kwargs Assign Assign Call call:Text Assign Assign Assign Return return:yes" - }, - { - "library": "uvicorn", - "name": "process_subprotocol", - "source_code": "def process_subprotocol(self, headers: Headers, available_subprotocols: Sequence[Subprotocol] | None) -> Subprotocol | None: return self.accepted_subprotocol", - "docstring": "We override the standard 'process_subprotocol' behavior here so that we return whatever subprotocol is sent in the 'accept' message.", - "type": "method", - "file_path": "uvicorn\\uvicorn\\protocols\\websockets\\websockets_impl.py", - "ast_data": "FunctionDef name:process_subprotocol arguments arg:self arg:headers type:Headers arg:available_subprotocols type:Sequence[Subprotocol] | None Return return:yes" - }, - { - "library": "tensorflow", - "name": "embedding_layouts", - "source_code": "@property def embedding_layouts(self) -> Dict[str, sparse_core_layout_pb2.SparseCoreTableLayout]: return self._s.table_to_layout", - "docstring": "Returns how the tables are laid out in the variables. The SparseCoreTableLayout describes how a table is stored in its internal state. You need this only if you need to pull apart the internal state.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py", - "ast_data": "FunctionDef name:embedding_layouts arguments arg:self Return return:yes" - }, - { - "library": "scrapy", - "name": "has_pending_requests", - "source_code": "@abstractmethod def has_pending_requests(self) -> bool: raise NotImplementedError", - "docstring": "`` otherwise", - "type": "method", - "file_path": "scrapy\\scrapy\\core\\scheduler.py", - "ast_data": "FunctionDef name:has_pending_requests arguments arg:self Raise raises:NotImplementedError" - }, - { - "library": "scikit-learn", - "name": "get_xp", - "source_code": "def get_xp(xp: ModuleType) -> Callable[[Callable[..., _T]], Callable[..., _T]]: def inner(f: Callable[..., _T], /) -> Callable[..., _T]: @wraps(f) def wrapped_f(*args: object, **kwargs: object) -> object: return f(*args, xp = xp, **kwargs) sig = signature(f) new_sig = sig.replace(parameters = [par for i, par in sig.parameters.items() if i ! = 'xp']) if wrapped_f.__doc__ is None: wrapped_f.__doc__ = f'Array API compatibility wrapper for {f.__name__}.\\n\\nSee the corresponding documentation in NumPy/CuPy and/or the array API\\nspecification for more details.\\n\\n' wrapped_f.__signature__ = new_sig return wrapped_f return inner", - "docstring": "Decorator to automatically replace xp with the corresponding array module. Use like import numpy as np @get_xp(np) def func(x, /, xp, kwarg=None): return xp.func(x, kwarg=kwarg) Note that xp must be a keyword argument and come after all non-keyword arguments.", - "type": "function", - "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\_internal.py", - "ast_data": "FunctionDef name:get_xp arguments arg:xp type:ModuleType FunctionDef name:inner arguments FunctionDef name:wrapped_f arguments vararg:args kwarg:kwargs Call call:wraps Return return:yes Assign Call call:signature Assign Call call:replace If Compare op:Is Assign Assign Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "reshard", - "source_code": "def reshard(self) -> None: state = self._get_fsdp_state() if (fsdp_param_group: = state._fsdp_param_group): fsdp_param_group.reshard()", - "docstring": "Reshards the module's parameters, freeing the unsharded parameters if they are allocated and registering the sharded parameters to the module. This method is *not* recursive.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py", - "ast_data": "FunctionDef name:reshard arguments arg:self Assign Call call:_get_fsdp_state If" - }, - { - "library": "pytorch", - "name": "sym_max", - "source_code": "def sym_max(a, b): if overrides.has_torch_function((a, b)): return overrides.handle_torch_function(sym_max, (a, b), a, b) if isinstance(a, (SymInt, SymFloat)): return a.__sym_max__(b) elif isinstance(b, (SymInt, SymFloat)): return b.__sym_max__(a) all_types, float_types = __all_and_float_types() assert isinstance(a, all_types), type(a) assert isinstance(b, all_types), type(b) if isinstance(a, float_types) or isinstance(b, float_types): return builtins.float(builtins.max(a, b)) else: return builtins.max(a, b)", - "docstring": "SymInt-aware utility for max which avoids branching on a < b. Unlike builtins.max(), this only works for int/float, and it always promotes to float if any argument is float (unlike builtins.max, which will faithfully preserve the type of the input argument).", - "type": "function", - "file_path": "pytorch\\torch\\__init__.py", - "ast_data": "FunctionDef name:sym_max arguments arg:a arg:b If Call call:has_torch_function Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes Assign Call call:__all_and_float_types If BoolOp Call call:isinstance Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "scikit-learn", - "name": "Hidden", - "source_code": "class Hidden: def __init__(self, constraint): self.constraint = constraint", - "docstring": "Class encapsulating a constraint not meant to be exposed to the user. Parameters ---------- constraint : str or _Constraint instance The constraint to be used internally.", - "type": "class", - "file_path": "scikit-learn\\sklearn\\utils\\_param_validation.py", - "ast_data": "ClassDef name:Hidden FunctionDef name:__init__ arguments arg:self arg:constraint Assign" - }, - { - "library": "pytorch", - "name": "pop", - "source_code": "def pop(): nonlocal nexti if key_stack: key = key_stack.pop() if nexti < = key[1]: exn_tab.append(ExceptionTableEntry(max(key[0], nexti), key[1], *exn_dict[key])) nexti = key[1] + 2", - "docstring": "Pop the key_stack and append an exception table entry if possible.", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py", - "ast_data": "FunctionDef name:pop arguments If Assign Call call:pop If Compare op:LtE Assign" - }, - { - "library": "pytorch", - "name": "init_cellvars", - "source_code": "def init_cellvars(parent, result: dict[str, VariableTracker], code): side_effects = parent.output.side_effects for name in code.co_cellvars: new_cell = side_effects.track_cell_new() if name in result: side_effects.store_cell(new_cell, result.pop(name)) result[name] = new_cell", - "docstring": "Update to add mapping from local name to new cells created directly by , or update SideEffects in if the a local cell is already in (cell argument).", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\variables\\functions.py", - "ast_data": "FunctionDef name:init_cellvars arguments arg:parent arg:result type:dict[str, VariableTracker] arg:code Assign For Assign Call call:track_cell_new If Compare op:In Assign" - }, - { - "library": "django", - "name": "ListOptionAction", - "source_code": "class ListOptionAction(argparse.Action): def __call__(self, parser, namespace, value, option_string = None): if value.lower() = = 'true': setattr(namespace, self.dest, True) else: setattr(namespace, self.dest, value.split(', '))", - "docstring": "Custom argparse action for keywords that require a string list. If the string is 'True'/'true' then the option value will be a boolean instead.", - "type": "class", - "file_path": "django\\django\\contrib\\gis\\management\\commands\\ogrinspect.py", - "ast_data": "ClassDef name:ListOptionAction FunctionDef name:__call__ arguments arg:self arg:parser arg:namespace arg:value arg:option_string If Compare op:Eq" - }, - { - "library": "numpy", - "name": "english_upper", - "source_code": "def english_upper(s): uppered = s.translate(UPPER_TABLE) return uppered", - "docstring": "Apply English case rules to convert ASCII strings to all upper case. This is an internal utility function to replace calls to str.upper() such that we can avoid changing behavior with changing locales. In particular, Turkish has distinct dotted and dotless variants of the Latin letter \"I\" in both lowercase and uppercase. Thus, \"i\".upper() != \"I\" in a \"tr\" locale. Parameters ---------- s : str Returns ------- uppered : str Examples -------- >>> from numpy._core.numerictypes import english_upper >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' >>> english_upper('') ''", - "type": "function", - "file_path": "numpy\\numpy\\_core\\_string_helpers.py", - "ast_data": "FunctionDef name:english_upper arguments arg:s Assign Call call:translate Return return:yes" - }, - { - "library": "mongo", - "name": "list_search_indexes", - "source_code": "def list_search_indexes(self, name: Optional[str] = None, session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any) -> CommandCursor[Mapping[str, Any]]: if name is None: pipeline: _Pipeline = [{'$listSearchIndexes': {}}] else: pipeline = [{'$listSearchIndexes': {'name': name}}] coll = self.with_options(codec_options = DEFAULT_CODEC_OPTIONS, read_preference = ReadPreference.PRIMARY, write_concern = DEFAULT_WRITE_CONCERN, read_concern = DEFAULT_READ_CONCERN) cmd = _CollectionAggregationCommand(coll, CommandCursor, pipeline, kwargs, explicit_session = session is not None, comment = comment, user_fields = {'cursor': {'firstBatch': 1}}) return self._database.client._retryable_read(cmd.get_cursor, cmd.get_read_preference(session), session, retryable = not cmd._performs_write, operation = _Op.LIST_SEARCH_INDEX)", - "docstring": "Return a cursor over search indexes for the current collection. :param name: If given, the name of the index to search for. Only indexes with matching index names will be returned. If not given, all search indexes for the current collection will be returned. :param session: a :class:. :param comment: A user-provided comment to attach to this command. :return: A :class: over the result set. .. note:: requires a MongoDB server version 7.0+ Atlas cluster. .. versionadded:: 4.5", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\collection.py", - "ast_data": "FunctionDef name:list_search_indexes arguments arg:self arg:name type:Optional[str] arg:session type:Optional[ClientSession] arg:comment type:Optional[Any] kwarg:kwargs If Compare op:Is Assign Assign Call call:with_options Assign Call call:_CollectionAggregationCommand Return return:yes" - }, - { - "library": "cherrypy", - "name": "update", - "source_code": "def update(self, d): if not self.loaded: self.load() self._data.update(d)", - "docstring": "Update multiple session-stored objects in one go. D.update(E) -> None. Update D from E: for k in E: D[k] = E[k].", - "type": "method", - "file_path": "cherrypy\\cherrypy\\lib\\sessions.py", - "ast_data": "FunctionDef name:update arguments arg:self arg:d If" - }, - { - "library": "tensorflow", - "name": "AbstractCheckpointAdapter", - "source_code": "class AbstractCheckpointAdapter(abc.ABC): @classmethod @abc.abstractmethod def create_from_checkpoint(cls, path: str): pass @abc.abstractmethod def is_applicable(self, trackable: base.Trackable) -> bool: pass @abc.abstractmethod def get_reshard_callback(self, name: str) -> Optional[ReshardCallback]: pass def maybe_reshard(self, name: str) -> tuple[str, Optional[ReshardCallback]]: callback = self.get_reshard_callback(name) if callback is None: return (name, None) if callback.object_name(): return (callback.object_name(), callback) return (name, callback)", - "docstring": "Abstract API for checkpoint adapter. This is an experimental API that specifies how checkpoint restore should be adapted for specific trackable objects.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_adapter.py", - "ast_data": "ClassDef name:AbstractCheckpointAdapter FunctionDef name:create_from_checkpoint arguments arg:cls arg:path type:str FunctionDef name:is_applicable arguments arg:self arg:trackable type:base.Trackable FunctionDef name:get_reshard_callback arguments arg:self arg:name type:str FunctionDef name:maybe_reshard arguments arg:self arg:name type:str Assign Call call:get_reshard_callback If Compare op:Is Return return:yes If Call call:object_name Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "categorical_column_to_series", - "source_code": "def categorical_column_to_series(col: Column) -> tuple[pd.Series, Any]: categorical = col.describe_categorical if not categorical['is_dictionary']: raise NotImplementedError('Non-dictionary categoricals not supported yet') cat_column = categorical['categories'] if hasattr(cat_column, '_col'): categories = np.array(cat_column._col) else: raise NotImplementedError(\"Interchanging categorical columns isn't supported yet, and our fallback of using the `col._col` attribute (a ndarray) failed.\") buffers = col.get_buffers() codes_buff, codes_dtype = buffers['data'] codes = buffer_to_ndarray(codes_buff, codes_dtype, offset = col.offset, length = col.size()) if len(categories) > 0: values = categories[codes % len(categories)] else: values = codes cat = pd.Categorical(values, categories = categories, ordered = categorical['is_ordered']) data = pd.Series(cat) data = set_nulls(data, col, buffers['validity']) return (data, buffers)", - "docstring": "Convert a column holding categorical data to a pandas Series. Parameters ---------- col : Column Returns ------- tuple Tuple of pd.Series holding the data and the memory owner object that keeps the memory alive.", - "type": "function", - "file_path": "pandas\\pandas\\core\\interchange\\from_dataframe.py", - "ast_data": "FunctionDef name:categorical_column_to_series arguments arg:col type:Column Assign If Raise raises:NotImplementedError('Non-dictionary categoricals not supported yet') Assign If Call call:hasattr Assign Call call:array Raise raises:NotImplementedError(\"Interchanging categorical columns isn't supported yet, and our fallback of using the `col._col` attribute (a ndarray) failed.\") Assign Call call:get_buffers Assign Assign Call call:buffer_to_ndarray If Compare op:Gt Assign Assign Assign Call call:Categorical Assign Call call:Series Assign Call call:set_nulls Return return:yes" - }, - { - "library": "tensorflow", - "name": "int_shape", - "source_code": "@doc_controls.do_not_generate_docs def int_shape(x): try: shape = x.shape if not isinstance(shape, tuple): shape = tuple(shape.as_list()) return shape except ValueError: return None", - "docstring": "Returns the shape of tensor or variable as a tuple of int or None entries. Args: x: Tensor or variable. Returns: A tuple of integers (or None entries). Examples: >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> tf.keras.backend.int_shape(input) (2, 4, 5) >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val) >>> tf.keras.backend.int_shape(kvar) (2, 2)", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", - "ast_data": "FunctionDef name:int_shape arguments arg:x Try Assign If Assign Call call:tuple Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_per_replica_batch_size", - "source_code": "def get_per_replica_batch_size(self, global_batch_size): if global_batch_size % self._num_replicas_in_sync ! = 0: raise ValueError('The `global_batch_size` %r is not divisible by `num_replicas_in_sync` %r ' % (global_batch_size, self._num_replicas_in_sync)) return global_batch_size // self._num_replicas_in_sync", - "docstring": "Returns the per-replica batch size. Args: global_batch_size: the global batch size which should be divisible by . Returns: the per-replica batch size. Raises: ValueError: if not divisible by .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", - "ast_data": "FunctionDef name:get_per_replica_batch_size arguments arg:self arg:global_batch_size If Compare op:NotEq Raise raises:ValueError('The `global_batch_size` %r is not divisible by `num_replicas_in_sync` %r ' % (global_batch_size, self._num_replicas_in_sync)) Return return:yes" - }, - { - "library": "mongo", - "name": "ConnectionCheckedInEvent", - "source_code": "class ConnectionCheckedInEvent(_ConnectionIdEvent): __slots__ = ()", - "docstring": "Published when the driver checks in a Connection into the Pool. :param address: The address (host, port) pair of the server this Connection is attempting to connect to. :param connection_id: The integer ID of the Connection in this Pool. .. versionadded:: 3.9", - "type": "class", - "file_path": "mongo\\pymongo\\monitoring.py", - "ast_data": "ClassDef name:ConnectionCheckedInEvent Assign" - }, - { - "library": "pytorch", - "name": "register_state_dict_post_hook", - "source_code": "def register_state_dict_post_hook(self, hook): hook._from_public_api = True handle = RemovableHandle(self._state_dict_hooks) self._state_dict_hooks[handle.id] = hook return handle", - "docstring": "Register a post-hook for the :meth: method. It should have the following signature:: hook(module, state_dict, prefix, local_metadata) -> None The registered hooks can modify the `` inplace.", - "type": "method", - "file_path": "pytorch\\torch\\nn\\modules\\module.py", - "ast_data": "FunctionDef name:register_state_dict_post_hook arguments arg:self arg:hook Assign Assign Call call:RemovableHandle Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "set_feature_use", - "source_code": "def set_feature_use(feature: str, usage: bool): if get_metrics_context().in_progress(): get_metrics_context().set_key_value('feature_usage', feature, usage)", - "docstring": "Records whether we are using a feature Generally a feature is a JK.", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\utils.py", - "ast_data": "FunctionDef name:set_feature_use arguments arg:feature type:str arg:usage type:bool If Call call:in_progress" - }, - { - "library": "django", - "name": "select_template", - "source_code": "def select_template(self, template_name_list): if not template_name_list: raise TemplateDoesNotExist('No template names provided') not_found = [] for template_name in template_name_list: try: return self.get_template(template_name) except TemplateDoesNotExist as exc: if exc.args[0] not in not_found: not_found.append(exc.args[0]) continue raise TemplateDoesNotExist(', '.join(not_found))", - "docstring": "Given a list of template names, return the first that can be loaded.", - "type": "method", - "file_path": "django\\django\\template\\engine.py", - "ast_data": "FunctionDef name:select_template arguments arg:self arg:template_name_list If Raise raises:TemplateDoesNotExist('No template names provided') Assign For Try Return return:yes ExceptHandler If Compare op:NotIn Raise raises:TemplateDoesNotExist(', '.join(not_found))" - }, - { - "library": "scikit-learn", - "name": "min", - "source_code": "def min(self, y: Array | complex, /, copy: bool | None = None, xp: ModuleType | None = None) -> Array: xp = array_namespace(self._x) if xp is None else xp mxp = meta_namespace(self._x, xp = xp) y = xp.asarray(y) return self._op(_AtOp.MIN, mxp.minimum, mxp.minimum, y, copy = copy, xp = xp)", - "docstring": "Apply `` and return the updated array.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_at.py", - "ast_data": "FunctionDef name:min arguments arg:copy type:bool | None arg:xp type:ModuleType | None Assign Assign Call call:meta_namespace Assign Call call:asarray Return return:yes" - }, - { - "library": "tensorflow", - "name": "alias_tensors", - "source_code": "def alias_tensors(*args): def alias_if_tensor(a): return array_ops.identity(a) if isinstance(a, tensor.Tensor) else a if len(args) > 1: return (alias_if_tensor(a) for a in args) elif len(args) = = 1: return alias_if_tensor(args[0]) raise ValueError('at least one argument required')", - "docstring": "Wraps any Tensor arguments with an identity op. Any other argument, including Variables, is returned unchanged. Args: *args: Any arguments. Must contain at least one element. Returns: Same as *args, with Tensor instances replaced as described. Raises: ValueError: If args doesn't meet the requirements.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\utils\\misc.py", - "ast_data": "FunctionDef name:alias_tensors arguments vararg:args FunctionDef name:alias_if_tensor arguments arg:a Return return:yes If Compare op:Gt Return return:yes If Compare op:Eq Return return:yes Raise raises:ValueError('at least one argument required')" - }, - { - "library": "kornia", - "name": "get_perpendicular", - "source_code": "def get_perpendicular(lines: Tensor, points: Tensor) -> Tensor: KORNIA_CHECK_SHAPE(lines, ['*', 'N', '3']) KORNIA_CHECK_SHAPE(points, ['*', 'N', 'two']) if points.shape[2] = = 2: points_h: Tensor = convert_points_to_homogeneous(points) elif points.shape[2] = = 3: points_h = points else: raise AssertionError(points.shape) infinity_point = lines * torch.tensor([1, 1, 0], dtype = lines.dtype, device = lines.device).view(1, 1, 3) perp: Tensor = points_h.cross(infinity_point, dim = 2) return perp", - "docstring": "Compute the perpendicular to a line, through the point. Args: lines: tensor containing the set of lines :math:. points: tensor containing the set of points :math:. Returns: a tensor with shape :math: containing a vector of the epipolar perpendicular lines. Each line is described as :math: and encoding the vectors as :math:.", - "type": "function", - "file_path": "kornia\\kornia\\geometry\\epipolar\\fundamental.py", - "ast_data": "FunctionDef name:get_perpendicular arguments arg:lines type:Tensor arg:points type:Tensor If Compare op:Eq If Compare op:Eq Assign Raise raises:AssertionError(points.shape) Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_width_from_char_name", - "source_code": "def get_width_from_char_name(self, name): return self._metrics_by_name[name].width", - "docstring": "Get the width of the character from a type1 character name.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\_afm.py", - "ast_data": "FunctionDef name:get_width_from_char_name arguments arg:self arg:name Return return:yes" - }, - { - "library": "numpy", - "name": "put", - "source_code": "@array_function_dispatch(_put_dispatcher) def put(a, ind, v, mode = 'raise'): try: put = a.put except AttributeError as e: raise TypeError(f'argument 1 must be numpy.ndarray, not {type(a)}') from e return put(ind, v, mode = mode)", - "docstring": "Replaces specified elements of an array with given values. The indexing works on the flattened target array. is roughly equivalent to: :: a.flat[ind] = v Parameters ---------- a : ndarray Target array. ind : array_like Target indices, interpreted as integers. v : array_like Values to place in at target indices. If is shorter than it will be repeated as necessary. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. * 'raise' -- raise an error (default) * 'wrap' -- wrap around * 'clip' -- clip to the range 'clip' mode means that all indices that are too large are replaced by the index that addresses the last element along that axis. Note that this disables indexing with negative numbers. In 'raise' mode, if an exception occurs the target array may still be modified. See Also -------- putmask, place put_along_axis : Put elements by matching the array and the index arrays Examples -------- >>> import numpy as np >>> a = np.arange(5) >>> np.put(a, [0, 2], [-44, -55]) >>> a array([-44, 1, -55, 3, 4]) >>> a = np.arange(5) >>> np.put(a, 22, -5, mode='clip') >>> a array([ 0, 1, 2, 3, -5])", - "type": "function", - "file_path": "numpy\\numpy\\_core\\fromnumeric.py", - "ast_data": "FunctionDef name:put arguments arg:a arg:ind arg:v arg:mode Call call:array_function_dispatch Try Assign ExceptHandler Raise raises:TypeError(f'argument 1 must be numpy.ndarray, not {type(a)}') Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_node_index", - "source_code": "def get_node_index(layer, config_node_index): if isinstance(layer, input_layer_module.InputLayer): return 0 return node_index_map.get((layer.name, config_node_index), None)", - "docstring": "Returns node index in layer (might differ from config_node_index).", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\functional.py", - "ast_data": "FunctionDef name:get_node_index arguments arg:layer arg:config_node_index If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, saved_model_checksum: int = None, graph_def_program_hash: int = None, signature_def_hash: int = None, saved_object_graph_hash: int = None, checkpoint_hash: int = None, version: int = None): self.saved_model_checksum = saved_model_checksum self.graph_def_program_hash = graph_def_program_hash self.signature_def_hash = signature_def_hash self.saved_object_graph_hash = saved_object_graph_hash self.checkpoint_hash = checkpoint_hash self.version = version", - "docstring": "Initializes the instance based on values in the SavedModel fingerprint. Args: saved_model_checksum: Value of the. graph_def_program_hash: Value of the . signature_def_hash: Value of the . saved_object_graph_hash: Value of the . checkpoint_hash: Value of the . version: Value of the producer field of the VersionDef.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\saved_model\\fingerprinting.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:saved_model_checksum type:int arg:graph_def_program_hash type:int arg:signature_def_hash type:int arg:saved_object_graph_hash type:int arg:checkpoint_hash type:int arg:version type:int Assign Assign Assign Assign Assign Assign" - }, - { - "library": "pytorch", - "name": "ZeroPad3d", - "source_code": "class ZeroPad3d(ConstantPad3d): padding: tuple[int, int, int, int, int, int] def __init__(self, padding: _size_6_t) -> None: super().__init__(padding, 0.0) def extra_repr(self) -> str: return f'{self.padding}'", - "docstring": "Pads the input tensor boundaries with zero. For -dimensional padding, use :func:. Args: padding (int, tuple): the size of the padding. If is , uses the same padding in all boundaries. If a 6-, uses (:math:, :math:, :math:, :math:, :math:, :math:) Shape: - Input: :math: or :math:. - Output: :math: or :math:, where :math: :math: :math: Examples:: >>> m = nn.ZeroPad3d(3) >>> input = torch.randn(16, 3, 10, 20, 30) >>> output = m(input) >>> # using different paddings for different sides >>> m = nn.ZeroPad3d((3, 3, 6, 6, 0, 1)) >>> output = m(input)", - "type": "class", - "file_path": "pytorch\\torch\\nn\\modules\\padding.py", - "ast_data": "ClassDef name:ZeroPad3d FunctionDef name:__init__ arguments arg:self arg:padding type:_size_6_t FunctionDef name:extra_repr arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "metric_variable", - "source_code": "def metric_variable(shape, dtype, validate_shape = True, name = None): return variable_v1.VariableV1(lambda: array_ops.zeros(shape, dtype), trainable = False, collections = [ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.METRIC_VARIABLES], validate_shape = validate_shape, synchronization = variables.VariableSynchronization.ON_READ, aggregation = variables.VariableAggregation.SUM, name = name)", - "docstring": "Create variable in collections. If running in a context, the variable will be \"sync on read\". This means: * The returned object will be a container with separate variables per replica of the model. * When writing to the variable, e.g. using in a metric update, the update will be applied to the variable local to the replica. * To get a metric's result value, we need to sum the variable values across the replicas before computing the final answer. Furthermore, the final answer should be computed once instead of in every replica. Both of these are accomplished by running the computation of the final result value inside . Inside the , ops are only added to the graph once and access to a sync on read variable in a computation returns the sum across all replicas. Args: shape: Shape of the created variable. dtype: Type of the created variable. validate_shape: (Optional) Whether shape validation is enabled for the created variable. name: (Optional) String name of the created variable. Returns: A (non-trainable) variable initialized to zero, or if inside a scope a sync on read variable container.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py", - "ast_data": "FunctionDef name:metric_variable arguments arg:shape arg:dtype arg:validate_shape arg:name Return return:yes" - }, - { - "library": "pytorch", - "name": "ReplicationPad3d", - "source_code": "class ReplicationPad3d(_ReplicationPadNd): padding: tuple[int, int, int, int, int, int] def __init__(self, padding: _size_6_t) -> None: super().__init__() self.padding = _ntuple(6)(padding)", - "docstring": "Pads the input tensor using replication of the input boundary. For -dimensional padding, use :func:. Args: padding (int, tuple): the size of the padding. If is , uses the same padding in all boundaries. If a 6-, uses (:math:, :math:, :math:, :math:, :math:, :math:) Shape: - Input: :math: or :math:. - Output: :math: or :math:, where :math: :math: :math: Examples:: >>> # xdoctest: +IGNORE_WANT(\"non-deterministic\") >>> m = nn.ReplicationPad3d(3) >>> input = torch.randn(16, 3, 8, 320, 480) >>> output = m(input) >>> # using different paddings for different sides >>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1)) >>> output = m(input)", - "type": "class", - "file_path": "pytorch\\torch\\nn\\modules\\padding.py", - "ast_data": "ClassDef name:ReplicationPad3d FunctionDef name:__init__ arguments arg:self arg:padding type:_size_6_t Assign Call" - }, - { - "library": "pytorch", - "name": "schedule", - "source_code": "def schedule(snode): scheduled.append(snode) for buf_name in snode.get_buffer_names(): for snode in buffer_users[buf_name]: unmet_deps[snode].remove(buf_name) if len(unmet_deps[snode]) = = 0: heapq.heappush(ready, Runnable(snode))", - "docstring": "Schedules and put all unblocked nodes onto the ready queue.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\comms.py", - "ast_data": "FunctionDef name:schedule arguments arg:snode For Call call:get_buffer_names For If Compare op:Eq" - }, - { - "library": "pytorch", - "name": "TypeConstraintParam", - "source_code": "@dataclasses.dataclass(frozen = True) class TypeConstraintParam: name: str allowed_types: set[ir.TypeProtocol] description: str = '' def __hash__(self) -> int: return hash((self.name, tuple(self.allowed_types))) def __str__(self) -> str: allowed_types_str = ' | '.join((str(t) for t in self.allowed_types)) return f'{self.name} = {allowed_types_str}' @classmethod def any_tensor(cls, name: str, description: str = '') -> TypeConstraintParam: return cls(name, {ir.TensorType(dtype) for dtype in ir.DataType}, description) @classmethod def any_value(cls, name: str, description: str = '') -> TypeConstraintParam: return cls(name, _ALL_VALUE_TYPES, description)", - "docstring": "Type constraint for a parameter. Attributes: name: Name of the parameter. E.g. \"TFloat\" allowed_types: Allowed types for the parameter.", - "type": "class", - "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_schemas.py", - "ast_data": "ClassDef name:TypeConstraintParam Call call:dataclass FunctionDef name:__hash__ arguments arg:self Return return:yes FunctionDef name:__str__ arguments arg:self Assign Call call:join Return return:yes FunctionDef name:any_tensor arguments arg:cls arg:name type:str arg:description type:str Return return:yes FunctionDef name:any_value arguments arg:cls arg:name type:str arg:description type:str Return return:yes" - }, - { - "library": "algorithms", - "name": "dijkstra", - "source_code": "def dijkstra(self, src): dist = [float('inf')] * self.vertex_count dist[src] = 0 min_dist_set = [False] * self.vertex_count for _ in range(self.vertex_count): source = self.min_distance(dist, min_dist_set) min_dist_set[source] = True for target in range(self.vertex_count): if self.graph[source][target] < = 0 or min_dist_set[target]: continue if dist[target] > dist[source] + self.graph[source][target]: dist[target] = dist[source] + self.graph[source][target] return dist", - "docstring": "Given a node, returns the shortest distance to every other node", - "type": "method", - "file_path": "algorithms\\algorithms\\graph\\dijkstra.py", - "ast_data": "FunctionDef name:dijkstra arguments arg:self arg:src Assign Assign Assign For Call call:range Assign Call call:min_distance Assign For Call call:range If BoolOp Compare op:LtE If Compare op:Gt Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "scatter_min", - "source_code": "def scatter_min(self, sparse_delta, use_locking = False, name = None): raise NotImplementedError", - "docstring": "Updates this variable with the min of and itself. Args: sparse_delta: to use as an argument of min with this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", - "ast_data": "FunctionDef name:scatter_min arguments arg:self arg:sparse_delta arg:use_locking arg:name Raise raises:NotImplementedError" - }, - { - "library": "tensorflow", - "name": "as_dict", - "source_code": "@tf_export('experimental.extension_type.as_dict') def as_dict(value): return {field.name: getattr(value, field.name) for field in value._tf_extension_type_fields()}", - "docstring": "Extracts the attributes of and their values to a dict format. Unlike , this function is not recursive and in case of nested objects, only the top level object is converted to a dict. Args: value: An object. Returns: A dict that contains the attributes of and their values.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py", - "ast_data": "FunctionDef name:as_dict arguments arg:value Call call:tf_export Return return:yes" - }, - { - "library": "pandas", - "name": "count", - "source_code": "def count(self) -> int: return notna(self._values).sum().astype('int64')", - "docstring": "Return number of non-NA/null observations in the Series. Returns ------- int Number of non-null values in the Series. See Also -------- DataFrame.count : Count non-NA cells for each column or row. Examples -------- >>> s = pd.Series([0.0, 1.0, np.nan]) >>> s.count() 2", - "type": "method", - "file_path": "pandas\\pandas\\core\\series.py", - "ast_data": "FunctionDef name:count arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "integer_field_range", - "source_code": "def integer_field_range(self, internal_type): return self.integer_field_ranges[internal_type]", - "docstring": "Given an integer field internal type (e.g. 'PositiveIntegerField'), return a tuple of the (min_value, max_value) form representing the range of the column type bound to the field.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\operations.py", - "ast_data": "FunctionDef name:integer_field_range arguments arg:self arg:internal_type Return return:yes" - }, - { - "library": "matplotlib", - "name": "edit_margin_min", - "source_code": "def edit_margin_min(self, todo, size, cell = 0): if size > self.margin_vals[todo][cell]: self.edit_margin(todo, size, cell)", - "docstring": "Change the minimum size of the margin for one cell. Parameters ---------- todo : string (one of 'left', 'right', 'bottom', 'top') margin to alter. size : float Minimum size of the margin . If it is larger than the existing minimum it updates the margin size. Fraction of figure size. cell : int Cell column or row to edit.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py", - "ast_data": "FunctionDef name:edit_margin_min arguments arg:self arg:todo arg:size arg:cell If Compare op:Gt" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, N = 256, M = 256, shape = 'square', origin = (0, 0), name = 'bivariate colormap'): self.name = name self.N = int(N) self.M = int(M) _api.check_in_list(['square', 'circle', 'ignore', 'circleignore'], shape = shape) self._shape = shape self._rgba_bad = (0.0, 0.0, 0.0, 0.0) self._rgba_outside = (1.0, 0.0, 1.0, 1.0) self._isinit = False self.n_variates = 2 self._origin = (float(origin[0]), float(origin[1])) '#: When this colormap exists on a scalar mappable and colorbar_extend\\n #: is not False, colorbar creation will pick up ``colorbar_extend`` as\\n #: the default value for the ``extend`` keyword in the\\n #: `matplotlib.colorbar.Colorbar` constructor.\\n self.colorbar_extend = False'", - "docstring": "Parameters ---------- N : int, default: 256 The number of RGB quantization levels along the first axis. M : int, default: 256 The number of RGB quantization levels along the second axis. shape : {'square', 'circle', 'ignore', 'circleignore'} - 'square' each variate is clipped to [0,1] independently - 'circle' the variates are clipped radially to the center of the colormap, and a circular mask is applied when the colormap is displayed - 'ignore' the variates are not clipped, but instead assigned the 'outside' color - 'circleignore' a circular mask is applied, but the data is not clipped and instead assigned the 'outside' color origin : (float, float), default: (0,0) The relative origin of the colormap. Typically (0, 0), for colormaps that are linear on both axis, and (.5, .5) for circular colormaps. Used when getting 1D colormaps from 2D colormaps. name : str, optional The name of the colormap.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\colors.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:N arg:M arg:shape arg:origin arg:name Assign Assign Call call:int Assign Call call:int Assign Assign Assign Assign Assign Assign" - }, - { - "library": "seaborn", - "name": "plot_joint", - "source_code": "def plot_joint(self, func, **kwargs): kwargs = kwargs.copy() if str(func.__module__).startswith('seaborn'): kwargs['ax'] = self.ax_joint else: plt.sca(self.ax_joint) if self.hue is not None: kwargs['hue'] = self.hue self._inject_kwargs(func, kwargs, self._hue_params) if str(func.__module__).startswith('seaborn'): func(x = self.x, y = self.y, **kwargs) else: func(self.x, self.y, **kwargs) return self", - "docstring": "Draw a bivariate plot on the joint axes of the grid. Parameters ---------- func : plotting callable If a seaborn function, it should accept `JointGrid` for easy method chaining.", - "type": "method", - "file_path": "seaborn\\seaborn\\axisgrid.py", - "ast_data": "FunctionDef name:plot_joint arguments arg:self arg:func kwarg:kwargs Assign Call call:copy If Call call:startswith Assign If Compare op:IsNot Assign If Call call:startswith Return return:yes" - }, - { - "library": "tensorflow", - "name": "write_object_proto_for_resource_variable", - "source_code": "def write_object_proto_for_resource_variable(resource_variable, proto, options, enforce_naming = True): proto.variable.SetInParent() if enforce_naming and (not resource_variable.name.endswith(': 0')): raise ValueError(f\"Cowardly refusing to save variable {resource_variable.name} because of unexpected suffix in the name (expected ': 0')which won't be restored.\") proto.variable.name = tensor_module.get_op_name(resource_variable.name) proto.variable.trainable = resource_variable.trainable proto.variable.dtype = resource_variable.dtype.as_datatype_enum proto.variable.synchronization = resource_variable.synchronization.value proto.variable.aggregation = resource_variable.aggregation.value proto.variable.shape.CopyFrom(resource_variable.shape.as_proto()) if options.experimental_variable_policy._save_variable_devices(): if hasattr(resource_variable, 'device'): proto.variable.device = resource_variable.device", - "docstring": "Writes additional information of the variable into the SavedObject proto. This allows users to define a to provide extra information of the variable to the SavedObject. For example, DistributedVariable class would fill in components in the distributed context. Args: resource_variable: A or that has the information to be saved into the proto. proto: proto to update. options: A instance that configures save behavior. enforce_naming: A bool determining whether to check that names end in the expected string ':0'", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", - "ast_data": "FunctionDef name:write_object_proto_for_resource_variable arguments arg:resource_variable arg:proto arg:options arg:enforce_naming If BoolOp Raise raises:ValueError(f\"Cowardly refusing to save variable {resource_variable.name} because of unexpected suffix in the name (expected ':0')which won't be restored.\") Assign Call call:get_op_name Assign Assign Assign Assign If Call call:_save_variable_devices If Call call:hasattr Assign" - }, - { - "library": "pandas", - "name": "unit", - "source_code": "@cache_readonly def unit(self) -> str: return dtype_to_unit(self.dtype)", - "docstring": "The precision unit of the datetime data. Returns the precision unit for the dtype. It means the smallest time frame that can be stored within this dtype. Returns ------- str Unit string representation (e.g. \"ns\"). See Also -------- TimelikeOps.as_unit : Converts to a specific unit. Examples -------- >>> idx = pd.DatetimeIndex([\"2020-01-02 01:02:03.004005006\"]) >>> idx.unit 'ns' >>> idx.as_unit(\"s\").unit 's'", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py", - "ast_data": "FunctionDef name:unit arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "select_format", - "source_code": "def select_format(self, compiler, sql, params): if hasattr(self.output_field, 'select_format'): return self.output_field.select_format(compiler, sql, params) return (sql, params)", - "docstring": "Custom format for select clauses. For example, EXISTS expressions need to be wrapped in CASE WHEN on Oracle.", - "type": "method", - "file_path": "django\\django\\db\\models\\expressions.py", - "ast_data": "FunctionDef name:select_format arguments arg:self arg:compiler arg:sql arg:params If Call call:hasattr Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "patterns_to_regex", - "source_code": "def patterns_to_regex(allowed_patterns: list[str]) -> Any: rc = '(' for idx, pattern in enumerate(allowed_patterns): if idx > 0: rc + = '|' pattern_ = PeekableIterator(pattern) assert not any((c in pattern for c in '{}()[]\\\\')) for c in pattern_: if c = = '.': rc + = '\\\\.' elif c = = '+': rc + = '\\\\+' elif c = = '*': if pattern_.peek() = = '*': next(pattern_) rc + = '.*' else: rc + = '[^/]*' else: rc + = c rc + = ')' return re.compile(rc)", - "docstring": "pattern is glob-like, i.e. the only special sequences it has are: - ? - matches single character - * - matches any non-folder separator characters or no character - ** - matches any characters or no character Assuming that patterns are free of braces and backslashes the only character that needs to be escaped are dot and plus", - "type": "function", - "file_path": "pytorch\\.github\\scripts\\gitutils.py", - "ast_data": "FunctionDef name:patterns_to_regex arguments arg:allowed_patterns type:list[str] Assign For Call call:enumerate If Compare op:Gt Assign Call call:PeekableIterator For If Compare op:Eq If Compare op:Eq If Compare op:Eq If Compare op:Eq Return return:yes" - }, - { - "library": "kornia", - "name": "forward", - "source_code": "def forward(self, laf: torch.Tensor, img: torch.Tensor) -> torch.Tensor: return laf", - "docstring": "Run forward. Args: laf: :math: img: :math: Returns: LAF, unchanged :math:", - "type": "method", - "file_path": "kornia\\kornia\\feature\\orientation.py", - "ast_data": "FunctionDef name:forward arguments arg:self arg:laf type:torch.Tensor arg:img type:torch.Tensor Return return:yes" - }, - { - "library": "algorithms", - "name": "count_components", - "source_code": "def count_components(adjacency_list, size): count = 0 visited = [False] * (size + 1) for i in range(1, size + 1): if not visited[i]: dfs(i, visited, adjacency_list) count + = 1 return count", - "docstring": "Function that counts the Connected components on bases of DFS. return type : int", - "type": "function", - "file_path": "algorithms\\algorithms\\graph\\count_connected_number_of_component.py", - "ast_data": "FunctionDef name:count_components arguments arg:adjacency_list arg:size Assign Assign For Call call:range If Return return:yes" - }, - { - "library": "scikit-learn", - "name": "predict_proba", - "source_code": "@_available_if_base_estimator_has('predict_proba') def predict_proba(self, X): return self._get_predictions(X, output_method = 'predict_proba')", - "docstring": "Predict probability estimates. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input data. Returns ------- Y_prob : array-like of shape (n_samples, n_classes) The predicted probabilities.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\multioutput.py", - "ast_data": "FunctionDef name:predict_proba arguments arg:self arg:X Call call:_available_if_base_estimator_has Return return:yes" - }, - { - "library": "tensorflow", - "name": "f", - "source_code": "def f(original_nodes): del original_nodes return saved_debug_info", - "docstring": "Function to create for the given .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py", - "ast_data": "FunctionDef name:f arguments arg:original_nodes Return return:yes" - }, - { - "library": "pytorch", - "name": "constexpr_next_power_of_2", - "source_code": "@triton_builtin def constexpr_next_power_of_2(n: tl.constexpr, *, _builder: object = None) -> tl.constexpr: assert isinstance(n, tl.constexpr) return tl.constexpr(triton.next_power_of_2(n.value))", - "docstring": "A version triton.next_power_of_two that can be used within a kernel on constants.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_helpers.py", - "ast_data": "FunctionDef name:constexpr_next_power_of_2 arguments arg:n type:tl.constexpr Return return:yes" - }, - { - "library": "scikit-learn", - "name": "fit", - "source_code": "@_fit_context(prefer_skip_nested_validation = False) def fit(self, X, y = None): self.fit_transform(X) return self", - "docstring": "Fit X into an embedded space. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. If the method is 'exact', X may be a sparse matrix of type 'csr', 'csc' or 'coo'. If the method is 'barnes_hut' and the metric is 'precomputed', X may be a precomputed sparse graph. y : None Ignored. Returns ------- self : object Fitted estimator.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\manifold\\_t_sne.py", - "ast_data": "FunctionDef name:fit arguments arg:self arg:X arg:y Call call:_fit_context Return return:yes" - }, - { - "library": "kornia", - "name": "rot_y", - "source_code": "@classmethod def rot_y(cls, y: Tensor) -> So3: zs = zeros_like(y) return cls.exp(stack((zs, y, zs), -1))", - "docstring": "Construct a z-axis rotation. Args: y: the y-axis rotation angle.", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py", - "ast_data": "FunctionDef name:rot_y arguments arg:cls arg:y type:Tensor Assign Call call:zeros_like Return return:yes" - }, - { - "library": "pytorch", - "name": "reorder_pre_hook_nodes_to_schedule_asap", - "source_code": "def reorder_pre_hook_nodes_to_schedule_asap(self): for node in self.fx_tracer.graph.find_nodes(op = 'call_function', target = call_hook): if node.kwargs.get('hook_type', None) ! = 'pre_hook': continue getitem_node = node.args[0] input_nodes = self.get_all_nodes(node.args[1]) to_remove = [] to_append = [] hook_block = [node] for n in input_nodes: if n.op = = 'call_function' and n.target = = operator.getitem: to_append.append(n.args[0]) to_remove.append(n) hook_block.append(n) for a, b in zip(to_remove, to_append): input_nodes.remove(a) input_nodes.append(b) arg = max(input_nodes) if arg is not node.prev and (not self.is_placeholder(arg)): arg.append(getitem_node) for n in hook_block: getitem_node.append(n)", - "docstring": "In this function, we schedule the pre hooks as soon as possible. This does not match eager behavior (schedule pre hook right before its registered node), but it can make acc grad be scheduled properly when the pre hooks are registered to them. After reordering acc grad node, we will reorder the pre hooks again to mimic eager behavior.", - "type": "method", - "file_path": "pytorch\\torch\\_dynamo\\compiled_autograd.py", - "ast_data": "FunctionDef name:reorder_pre_hook_nodes_to_schedule_asap arguments arg:self For Call call:find_nodes If Compare op:NotEq Assign Assign Call call:get_all_nodes Assign Assign Assign For If BoolOp Compare op:Eq Compare op:Eq For Call call:zip Assign Call call:max If BoolOp Compare op:IsNot For" - }, - { - "library": "django", - "name": "action", - "source_code": "def action(function = None, *, permissions = None, description = None): def decorator(func): if permissions is not None: func.allowed_permissions = permissions if description is not None: func.short_description = description return func if function is None: return decorator else: return decorator(function)", - "docstring": "Conveniently add attributes to an action function:: @admin.action( permissions=['publish'], description='Mark selected stories as published', ) def make_published(self, request, queryset): queryset.update(status='p') This is equivalent to setting some attributes (with the original, longer names) on the function directly:: def make_published(self, request, queryset): queryset.update(status='p') make_published.allowed_permissions = ['publish'] make_published.short_description = 'Mark selected stories as published'", - "type": "function", - "file_path": "django\\django\\contrib\\admin\\decorators.py", - "ast_data": "FunctionDef name:action arguments arg:function FunctionDef name:decorator arguments arg:func If Compare op:IsNot Assign If Compare op:IsNot Assign Return return:yes If Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "generate_kernel_code_from_nodes", - "source_code": "def generate_kernel_code_from_nodes(self, nodes: Sequence[BaseSchedulerNode], benchmark_kernel: bool) -> str: raise NotImplementedError", - "docstring": "Generate a kernel given a list of pre-fused nodes.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\scheduler.py", - "ast_data": "FunctionDef name:generate_kernel_code_from_nodes arguments arg:self arg:nodes type:Sequence[BaseSchedulerNode] arg:benchmark_kernel type:bool Raise raises:NotImplementedError" - }, - { - "library": "django", - "name": "distinct_sql", - "source_code": "def distinct_sql(self, fields, params): if fields: raise NotSupportedError('DISTINCT ON fields is not supported by this database backend') else: return (['DISTINCT'], [])", - "docstring": "Return an SQL DISTINCT clause which removes duplicate rows from the result set. If any fields are given, only check the given fields for duplicates.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\operations.py", - "ast_data": "FunctionDef name:distinct_sql arguments arg:self arg:fields arg:params If Raise raises:NotSupportedError('DISTINCT ON fields is not supported by this database backend') Return return:yes" - }, - { - "library": "pytorch", - "name": "__init__", - "source_code": "def __init__(self, maxpool_result, input_var, kernel, padding, stride, dilation, matching_constraint_vars): self.maxpool_result = maxpool_result self.input_var = input_var self.kernel = kernel self.padding = padding self.stride = stride self.dilation = dilation self.matching_constraint = matching_constraint_vars", - "docstring": ":param maxpool_result: the result of maxpool :param input_var: input to convolution :param kernel: kernel tuple", - "type": "method", - "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:maxpool_result arg:input_var arg:kernel arg:padding arg:stride arg:dilation arg:matching_constraint_vars Assign Assign Assign Assign Assign Assign Assign" - }, - { - "library": "pandas", - "name": "get_na_values", - "source_code": "def get_na_values(col, na_values, na_fvalues, keep_default_na: bool): if isinstance(na_values, dict): if col in na_values: return (na_values[col], na_fvalues[col]) else: if keep_default_na: return (STR_NA_VALUES, set()) return (set(), set()) else: return (na_values, na_fvalues)", - "docstring": "Get the NaN values for a given column. Parameters ---------- col : str The name of the column. na_values : array-like, dict The object listing the NaN values as strings. na_fvalues : array-like, dict The object listing the NaN values as floats. keep_default_na : bool If is a dict, and the column is not mapped in the dictionary, whether to return the default NaN values or the empty set. Returns ------- nan_tuple : A length-two tuple composed of 1) na_values : the string NaN values for that column. 2) na_fvalues : the float NaN values for that column.", - "type": "function", - "file_path": "pandas\\pandas\\io\\parsers\\base_parser.py", - "ast_data": "FunctionDef name:get_na_values arguments arg:col arg:na_values arg:na_fvalues arg:keep_default_na type:bool If Call call:isinstance If Compare op:In Return return:yes If Return return:yes Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "history_map", - "source_code": "@property def history_map(self): return self._history_map", - "docstring": "The map that records all the tensors needed for backprop.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py", - "ast_data": "FunctionDef name:history_map arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "static_uniform_row_length", - "source_code": "@property def static_uniform_row_length(self): if self._uniform_row_length is not None: return tensor_util.constant_value(self._uniform_row_length) return None", - "docstring": "The number of values in each row of this partition, if statically known. Returns: The number of values in each row of this partition as an (if statically known); or (otherwise).", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", - "ast_data": "FunctionDef name:static_uniform_row_length arguments arg:self If Compare op:IsNot Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_transform", - "source_code": "def set_transform(self, t): self._transform = t self._transformSet = True self.pchanged() self.stale = True", - "docstring": "Set the artist transform. Parameters ---------- t :", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\artist.py", - "ast_data": "FunctionDef name:set_transform arguments arg:self arg:t Assign Assign Assign" - }, - { - "library": "sphinx", - "name": "doc2path", - "source_code": "def doc2path(self, docname: str, absolute: bool) -> _StrPath: try: filename = self._docname_to_path[docname] except KeyError: filename = Path(docname + self._first_source_suffix) if absolute: return _StrPath(self.srcdir / filename) return _StrPath(filename)", - "docstring": "Return the filename for the document name. If *absolute* is True, return as an absolute path. Else, return as a relative path to the source directory.", - "type": "method", - "file_path": "sphinx\\sphinx\\project.py", - "ast_data": "FunctionDef name:doc2path arguments arg:self arg:docname type:str arg:absolute type:bool Try Assign ExceptHandler Assign Call call:Path If Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "clip_by_average_norm", - "source_code": "@deprecation.deprecated(date = None, instructions = 'clip_by_average_norm is deprecated in TensorFlow 2.0. Please use clip_by_norm(t, clip_norm * tf.cast(tf.size(t), tf.float32), name) instead.') @tf_export(v1 = ['clip_by_average_norm']) @dispatch.add_dispatch_support def clip_by_average_norm(t, clip_norm, name = None): with ops.name_scope(name, 'clip_by_average_norm', [t, clip_norm]) as name: t = ops.convert_to_tensor(t, name = 't') n_element = math_ops.cast(array_ops.size(t), dtypes.float32) l2norm_inv = math_ops.rsqrt(math_ops.reduce_sum(t * t, math_ops.range(array_ops.rank(t)))) tclip = array_ops.identity(t * clip_norm * math_ops.minimum(l2norm_inv * n_element, constant_op.constant(1.0) / clip_norm), name = name) return tclip", - "docstring": "Clips tensor values to a maximum average L2-norm. Given a tensor , and a maximum clip value , this operation normalizes so that its average L2-norm is less than or equal to . Specifically, if the average L2-norm is already less than or equal to , then is not modified. If the average L2-norm is greater than , then this operation returns a tensor of the same type and shape as with its values set to: In this case, the average L2-norm of the output tensor is . This operation is typically used to clip gradients before applying them with an optimizer. Args: t: A . clip_norm: A 0-D (scalar) > 0. A maximum clipping value. name: A name for the operation (optional). Returns: A clipped .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\clip_ops.py", - "ast_data": "FunctionDef name:clip_by_average_norm arguments arg:t arg:clip_norm arg:name Call call:deprecated Call call:tf_export With Assign Call call:convert_to_tensor Assign Call call:cast Assign Call call:rsqrt Assign Call call:identity Return return:yes" - }, - { - "library": "matplotlib", - "name": "axhspan", - "source_code": "@_docstring.interpd def axhspan(self, ymin, ymax, xmin = 0, xmax = 1, **kwargs): self._check_no_units([xmin, xmax], ['xmin', 'xmax']) (ymin, ymax), = self._process_unit_info([('y', [ymin, ymax])], kwargs) p = mpatches.Rectangle((xmin, ymin), xmax - xmin, ymax - ymin, **kwargs) p.set_transform(self.get_yaxis_transform(which = 'grid')) ix = self.dataLim.intervalx.copy() mx = self.dataLim.minposx self.add_patch(p) self.dataLim.intervalx = ix self.dataLim.minposx = mx p.get_path()._interpolation_steps = mpl.axis.GRIDLINE_INTERPOLATION_STEPS self._request_autoscale_view('y') return p", - "docstring": "Add a horizontal span (rectangle) across the Axes. The rectangle spans from *ymin* to *ymax* vertically, and, by default, the whole x-axis horizontally. The x-span can be set using *xmin* (default: 0) and *xmax* (default: 1) which are in axis units; e.g. `~.Axes.set_xlim~matplotlib.patches.Rectangle~matplotlib.patches.Rectangle` properties %(Rectangle:kwdoc)s See Also -------- axvspan : Add a vertical span across the Axes.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py", - "ast_data": "FunctionDef name:axhspan arguments arg:self arg:ymin arg:ymax arg:xmin arg:xmax kwarg:kwargs Assign Call call:_process_unit_info Assign Call call:Rectangle Assign Call call:copy Assign Assign Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "call_method", - "source_code": "@compatibility(is_backward_compatible = True) def call_method(self, method_name: str, args: Optional[tuple['Argument', ...]] = None, kwargs: Optional[dict[str, 'Argument']] = None, type_expr: Optional[Any] = None) -> Node: return self.create_node('call_method', method_name, args, kwargs, type_expr = type_expr)", - "docstring": "Insert a `Graph.create_node`.", - "type": "method", - "file_path": "pytorch\\torch\\fx\\graph.py", - "ast_data": "FunctionDef name:call_method arguments arg:self arg:method_name type:str arg:args type:Optional[tuple['Argument', ...]] arg:kwargs type:Optional[dict[str, 'Argument']] arg:type_expr type:Optional[Any] Call call:compatibility Return return:yes" - }, - { - "library": "tensorflow", - "name": "update_reorders_v2", - "source_code": "def update_reorders_v2(output_file_path): spec = tf_upgrade_v2.TFAPIChangeSpec() reordered_function_names = spec.reordered_function_names need_kwargs_function_names = spec.function_transformers.keys() function_renames = spec.symbol_renames all_reorders = collect_function_arg_names(reordered_function_names, need_kwargs_function_names, function_renames) rename_lines = [get_reorder_line(name, arg_names) for name, arg_names in all_reorders.items()] renames_file_text = '%sreorders = {\\n%s\\n}\\n' % (_FILE_HEADER, ', \\n'.join(sorted(rename_lines))) file_io.write_string_to_file(output_file_path, renames_file_text)", - "docstring": "Writes a Python dictionary mapping function name to argument order. Args: output_file_path: File path to write output to. Any existing contents would be replaced.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\update\\generate_v2_reorders_map.py", - "ast_data": "FunctionDef name:update_reorders_v2 arguments arg:output_file_path Assign Call call:TFAPIChangeSpec Assign Assign Call call:keys Assign Assign Call call:collect_function_arg_names Assign Assign" - }, - { - "library": "pandas", - "name": "equals", - "source_code": "def equals(self, other: Any) -> bool: if self.is_(other): return True if not isinstance(other, Index): return False if len(self) ! = len(other): return False if isinstance(self.dtype, StringDtype) and self.dtype.na_value is np.nan and (other.dtype ! = self.dtype): return other.equals(self.astype(object)) if is_object_dtype(self.dtype) and (not is_object_dtype(other.dtype)): return other.equals(self) if isinstance(other, ABCMultiIndex): return other.equals(self) if isinstance(self._values, ExtensionArray): if not isinstance(other, type(self)): return False earr = cast(ExtensionArray, self._data) return earr.equals(other._data) if isinstance(other.dtype, ExtensionDtype): return other.equals(self) return array_equivalent(self._values, other._values)", - "docstring": "Determine if two Index object are equal. The things that are being compared are: * The elements inside the Index object. * The order of the elements inside the Index object. Parameters ---------- other : Any The other object to compare against. Returns ------- bool True if \"other\" is an Index and it has the same elements and order as the calling index; False otherwise. See Also -------- Index.identical: Checks that object attributes and types are also equal. Index.has_duplicates: Check if the Index has duplicate values. Index.is_unique: Return if the index has unique values. Examples -------- >>> idx1 = pd.Index([1, 2, 3]) >>> idx1 Index([1, 2, 3], dtype='int64') >>> idx1.equals(pd.Index([1, 2, 3])) True The elements inside are compared >>> idx2 = pd.Index([\"1\", \"2\", \"3\"]) >>> idx2 Index(['1', '2', '3'], dtype='object') >>> idx1.equals(idx2) False The order is compared >>> ascending_idx = pd.Index([1, 2, 3]) >>> ascending_idx Index([1, 2, 3], dtype='int64') >>> descending_idx = pd.Index([3, 2, 1]) >>> descending_idx Index([3, 2, 1], dtype='int64') >>> ascending_idx.equals(descending_idx) False The dtype is *not* compared >>> int64_idx = pd.Index([1, 2, 3], dtype=\"int64\") >>> int64_idx Index([1, 2, 3], dtype='int64') >>> uint64_idx = pd.Index([1, 2, 3], dtype=\"uint64\") >>> uint64_idx Index([1, 2, 3], dtype='uint64') >>> int64_idx.equals(uint64_idx) True", - "type": "method", - "file_path": "pandas\\pandas\\core\\indexes\\base.py", - "ast_data": "FunctionDef name:equals arguments arg:self arg:other type:Any If Call call:is_ Return return:yes If Return return:yes If Compare op:NotEq Return return:yes If BoolOp Call call:isinstance Compare op:Is Compare op:NotEq Return return:yes If BoolOp Call call:is_object_dtype Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance If Return return:yes Assign Call call:cast Return return:yes If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "get_comm_counts", - "source_code": "def get_comm_counts(self) -> dict[Any, int]: return self.comm_counts", - "docstring": "Returns the communication counts as a dictionary. Returns: Dict[Any, int]: The communication counts as a dictionary.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\tensor\\debug\\_comm_mode.py", - "ast_data": "FunctionDef name:get_comm_counts arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "parse_example_dataset", - "source_code": "@tf_export('data.experimental.parse_example_dataset') @deprecation.deprecated(None, 'Use `tf.data.Dataset.map(tf.io.parse_example(...))` instead.') def parse_example_dataset(features, num_parallel_calls = 1, deterministic = None): if features is None: raise ValueError('Argument `features` is required, but not specified.') def _apply_fn(dataset): out_dataset = _ParseExampleDataset(dataset, features, num_parallel_calls, deterministic) if any((isinstance(feature, parsing_ops.SparseFeature) or isinstance(feature, parsing_ops.RaggedFeature) for feature in features.values())): out_dataset = out_dataset.map(lambda x: parsing_ops._construct_tensors_for_composite_features(features, x), num_parallel_calls = num_parallel_calls) return out_dataset return _apply_fn", - "docstring": "A transformation that parses protos into a of tensors. Parses a number of serialized protos given in . We refer to as a batch with many entries of individual protos. This op parses serialized examples into a dictionary mapping keys to , , and objects. is a dict from keys to , , , and objects. Each and is mapped to a ; each is mapped to a ; and each is mapped to a . See for more details about feature dictionaries. Args: features: A mapping feature keys to , , , and values. num_parallel_calls: (Optional.) A scalar , representing the number of parsing processes to call in parallel. deterministic: (Optional.) A boolean controlling whether determinism should be traded for performance by allowing elements to be produced out of order if some parsing calls complete faster than others. If is , the dataset option ( by default) is used to decide whether to produce elements deterministically. Returns: A dataset transformation function, which can be passed to . Raises: ValueError: if features argument is None.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\parsing_ops.py", - "ast_data": "FunctionDef name:parse_example_dataset arguments arg:features arg:num_parallel_calls arg:deterministic Call call:tf_export Call call:deprecated If Compare op:Is Raise raises:ValueError('Argument `features` is required, but not specified.') FunctionDef name:_apply_fn arguments arg:dataset Assign Call call:_ParseExampleDataset If Call call:any Assign Call call:map Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "update_state", - "source_code": "def update_state(self, y_true, y_pred, sample_weight = None): y_true = math_ops.cast(y_true, self._dtype) y_pred = math_ops.cast(y_pred, self._dtype) [y_true, y_pred], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values([y_true, y_pred], sample_weight) y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(y_pred, y_true) ag_fn = autograph.tf_convert(self._fn, ag_ctx.control_status_ctx()) matches = ag_fn(y_true, y_pred, **self._fn_kwargs) return super(MeanMetricWrapper, self).update_state(matches, sample_weight = sample_weight)", - "docstring": "Accumulates metric statistics. and should have the same shape. Args: y_true: Ground truth values. shape = . y_pred: The predicted values. shape = . sample_weight: Optional acts as a coefficient for the metric. If a scalar is provided, then the metric is simply scaled by the given value. If is a tensor of size , then the metric for each sample of the batch is rescaled by the corresponding element in the vector. If the shape of is (or can be broadcasted to this shape), then each metric element of is scaled by the corresponding value of . (Note on : all metric functions reduce by 1 dimension, usually the last axis (-1)). Returns: Update op.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", - "ast_data": "FunctionDef name:update_state arguments arg:self arg:y_true arg:y_pred arg:sample_weight Assign Call call:cast Assign Call call:cast Assign Call call:ragged_assert_compatible_and_get_flat_values Assign Call call:squeeze_or_expand_dimensions Assign Call call:tf_convert Assign Call call:ag_fn Return return:yes" - }, - { - "library": "tensorflow", - "name": "saver", - "source_code": "@property def saver(self): return self._saver", - "docstring": "Return the Saver used by the supervisor. Returns: A Saver object.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py", - "ast_data": "FunctionDef name:saver arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_fontconfig_pattern", - "source_code": "def get_fontconfig_pattern(self): return generate_fontconfig_pattern(self)", - "docstring": "Get a fontconfig_ pattern_ suitable for looking up the font as specified with fontconfig's `` utility. This support does not depend on fontconfig; we are merely borrowing its pattern syntax for use here.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py", - "ast_data": "FunctionDef name:get_fontconfig_pattern arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "should_redirect_with_slash", - "source_code": "def should_redirect_with_slash(self, request): if settings.APPEND_SLASH and (not request.path_info.endswith('/')): urlconf = getattr(request, 'urlconf', None) if not is_valid_path(request.path_info, urlconf): match = is_valid_path('%s/' % request.path_info, urlconf) if match: view = match.func return getattr(view, 'should_append_slash', True) return False", - "docstring": "Return True if settings.APPEND_SLASH is True and appending a slash to the request path turns an invalid path into a valid one.", - "type": "method", - "file_path": "django\\django\\middleware\\common.py", - "ast_data": "FunctionDef name:should_redirect_with_slash arguments arg:self arg:request If BoolOp Assign Call call:getattr If Assign Call call:is_valid_path If Assign Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "RequestSite", - "source_code": "class RequestSite: def __init__(self, request): self.domain = self.name = request.get_host() def __str__(self): return self.domain def save(self, force_insert = False, force_update = False): raise NotImplementedError('RequestSite cannot be saved.') def delete(self): raise NotImplementedError('RequestSite cannot be deleted.')", - "docstring": "A class that shares the primary interface of Site (i.e., it has `` attributes) but gets its data from an HttpRequest object rather than from a database. The save() and delete() methods raise NotImplementedError.", - "type": "class", - "file_path": "django\\django\\contrib\\sites\\requests.py", - "ast_data": "ClassDef name:RequestSite FunctionDef name:__init__ arguments arg:self arg:request Assign Call call:get_host FunctionDef name:__str__ arguments arg:self Return return:yes FunctionDef name:save arguments arg:self arg:force_insert arg:force_update Raise raises:NotImplementedError('RequestSite cannot be saved.') FunctionDef name:delete arguments arg:self Raise raises:NotImplementedError('RequestSite cannot be deleted.')" - }, - { - "library": "mongo", - "name": "raw_response", - "source_code": "def raw_response(self, cursor_id: Optional[int] = None, user_fields: Optional[Mapping[str, Any]] = None) -> list[bytes]: if self.flags & 1: if cursor_id is None: raise ProtocolError('No cursor id for getMore operation') msg = 'Cursor not found, cursor id: %d' % (cursor_id,) errobj = {'ok': 0, 'errmsg': msg, 'code': 43} raise CursorNotFound(msg, 43, errobj) elif self.flags & 2: error_object: dict = bson.BSON(self.documents).decode() error_object.setdefault('ok', 0) if error_object['$err'].startswith(HelloCompat.LEGACY_ERROR): raise NotPrimaryError(error_object['$err'], error_object) elif error_object.get('code') = = 50: default_msg = 'operation exceeded time limit' raise ExecutionTimeout(error_object.get('$err', default_msg), error_object.get('code'), error_object) raise OperationFailure('database error: %s' % error_object.get('$err'), error_object.get('code'), error_object) if self.documents: return [self.documents] return []", - "docstring": "Check the response header from the database, without decoding BSON. Check the response for errors and unpack. Can raise CursorNotFound, NotPrimaryError, ExecutionTimeout, or OperationFailure. :param cursor_id: cursor_id we sent to get this response - used for raising an informative exception when we get cursor id not valid at server response.", - "type": "method", - "file_path": "mongo\\pymongo\\message.py", - "ast_data": "FunctionDef name:raw_response arguments arg:self arg:cursor_id type:Optional[int] arg:user_fields type:Optional[Mapping[str, Any]] If If Compare op:Is Raise raises:ProtocolError('No cursor id for getMore operation') Assign Assign Raise raises:CursorNotFound(msg, 43, errobj) If If Call call:startswith Raise raises:NotPrimaryError(error_object['$err'], error_object) If Compare op:Eq Assign Raise raises:ExecutionTimeout(error_object.get('$err', default_msg), error_object.get('code'), error_object) Raise raises:OperationFailure('database error: %s' % error_object.get('$err'), error_object.get('code'), error_object) If Return return:yes Return return:yes" - }, - { - "library": "algorithms", - "name": "__init__", - "source_code": "def __init__(self, v1, v2): self.queue = [_ for _ in (v1, v2) if _] print(self.queue)", - "docstring": "Initialize your data structure here. :type v1: List[int] :type v2: List[int]", - "type": "method", - "file_path": "algorithms\\algorithms\\queues\\zigzagiterator.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:v1 arg:v2 Assign" - }, - { - "library": "pandas", - "name": "strings_with_wrong_placed_whitespace", - "source_code": "def strings_with_wrong_placed_whitespace(file_obj: IO[str]) -> Iterable[tuple[int, str]]: def has_wrong_whitespace(first_line: str, second_line: str) -> bool: if first_line.endswith('\\\\n'): return False elif first_line.startswith(' ') or second_line.startswith(' '): return False elif first_line.endswith(' ') or second_line.endswith(' '): return False elif not first_line.endswith(' ') and second_line.startswith(' '): return True return False tokens: list = list(tokenize.generate_tokens(file_obj.readline)) for first_token, second_token, third_token in zip(tokens, tokens[1:], tokens[2:]): if first_token.type = = third_token.type = = token.STRING and second_token.type = = token.NL: first_string: str = first_token.string[_get_literal_string_prefix_len(first_token.string) + 1: -1] second_string: str = third_token.string[_get_literal_string_prefix_len(third_token.string) + 1: -1] if has_wrong_whitespace(first_string, second_string): yield (third_token.start[0], 'String has a space at the beginning instead of the end of the previous string.')", - "docstring": "Test case for leading spaces in concated strings. For example: >>> rule = ( ... \"We want the space at the end of the line, \" ... \"not at the beginning\" ... ) Instead of: >>> rule = ( ... \"We want the space at the end of the line,\" ... \" not at the beginning\" ... ) Parameters ---------- file_obj : IO File-like object containing the Python code to validate. Yields ------ line_number : int Line number of unconcatenated string. msg : str Explanation of the error.", - "type": "function", - "file_path": "pandas\\scripts\\validate_unwanted_patterns.py", - "ast_data": "FunctionDef name:strings_with_wrong_placed_whitespace arguments arg:file_obj type:IO[str] FunctionDef name:has_wrong_whitespace arguments arg:first_line type:str arg:second_line type:str If Call call:endswith Return return:yes If BoolOp Call call:startswith Call call:startswith Return return:yes If BoolOp Call call:endswith Call call:endswith Return return:yes If BoolOp Call call:startswith Return return:yes Return return:yes For Call call:zip If BoolOp Compare op:Eq op:Eq Compare op:Eq If Call call:has_wrong_whitespace" - }, - { - "library": "tensorflow", - "name": "get", - "source_code": "def get(self, token, default = None): return self._funcs.get(token, default)", - "docstring": "Gets the registered function corresponding to .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\script_ops.py", - "ast_data": "FunctionDef name:get arguments arg:self arg:token arg:default Return return:yes" - }, - { - "library": "tensorflow", - "name": "sess_str", - "source_code": "@property def sess_str(self): raise NotImplementedError('sess_str')", - "docstring": "The TensorFlow process to which this session will connect.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\client\\session.py", - "ast_data": "FunctionDef name:sess_str arguments arg:self Raise raises:NotImplementedError('sess_str')" - }, - { - "library": "scikit-learn", - "name": "predict", - "source_code": "def predict(self, X): check_is_fitted(self, attributes = ['_label_binarizer']) if self._label_binarizer.y_type_.startswith('multilabel'): scores = 2 * (self.decision_function(X) > 0) - 1 return self._label_binarizer.inverse_transform(scores) return super().predict(X)", - "docstring": "Predict class labels for samples in . Parameters ---------- X : {array-like, spare matrix} of shape (n_samples, n_features) The data matrix for which we want to predict the targets. Returns ------- y_pred : ndarray of shape (n_samples,) or (n_samples, n_outputs) Vector or matrix containing the predictions. In binary and multiclass problems, this is a vector containing . In a multilabel problem, it returns a matrix of shape .", - "type": "method", - "file_path": "scikit-learn\\sklearn\\linear_model\\_ridge.py", - "ast_data": "FunctionDef name:predict arguments arg:self arg:X If Call call:startswith Assign Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "Ackley01", - "source_code": "class Ackley01(Benchmark): change_dimensionality = True def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-35.0] * self.N, [35.0] * self.N)) self.global_optimum = [[0 for _ in range(self.N)]] self.fglob = 0.0 def fun(self, x, *args): self.nfev + = 1 u = sum(x ** 2) v = sum(cos(2 * pi * x)) return -20.0 * exp(-0.2 * sqrt(u / self.N)) - exp(v / self.N) + 20.0 + exp(1.0)", - "docstring": "Ackley01 objective function. The Ackley01 [1]_ global optimization problem is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Ackley01}}(x) = -20 e^{-0.2 \\sqrt{\\frac{1}{n} \\sum_{i=1}^n x_i^2}} - e^{\\frac{1}{n} \\sum_{i=1}^n \\cos(2 \\pi x_i)} + 20 + e Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Adorio, E. MVF - \"Multivariate Test Functions Library in C for Unconstrained Global Optimization\", 2005 TODO: the -0.2 factor in the exponent of the first term is given as -0.02 in Jamil et al.", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_A.py", - "ast_data": "ClassDef name:Ackley01 Assign FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Assign Call call:sum Assign Call call:sum Return return:yes" - }, - { - "library": "django", - "name": "merge_dicts", - "source_code": "@staticmethod def merge_dicts(dicts): merged = {} for d in reversed(dicts): merged.update(d) return merged", - "docstring": "Merge dicts in reverse to preference the order of the original list. e.g., merge_dicts([a, b]) will preference the keys in 'a' over those in 'b'.", - "type": "method", - "file_path": "django\\django\\db\\models\\query_utils.py", - "ast_data": "FunctionDef name:merge_dicts arguments arg:dicts Assign For Call call:reversed Return return:yes" - }, - { - "library": "tensorflow", - "name": "get", - "source_code": "def get(self): try: while self.is_running(): inputs = self.queue.get(block = True).get() self.queue.task_done() if inputs is not None: yield inputs except StopIteration: last_ones = [] while self.queue.qsize() > 0: last_ones.append(self.queue.get(block = True)) for f in last_ones: f.wait() last_ones = [future.get() for future in last_ones if future.successful()] for inputs in last_ones: if inputs is not None: yield inputs except Exception as e: self.stop() if 'generator already executing' in str(e): raise RuntimeError('Your generator is NOT thread-safe. Keras requires a thread-safe generator when `use_multiprocessing = False, workers > 1`. ') raise e", - "docstring": "Creates a generator to extract data from the queue. Skip the data if it is . Yields: The next element in the queue, i.e. a tuple or .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py", - "ast_data": "FunctionDef name:get arguments arg:self Try While Call call:is_running Assign Call call:get If Compare op:IsNot ExceptHandler Assign While Compare op:Gt For Assign For If Compare op:IsNot ExceptHandler If Compare op:In Raise raises:RuntimeError('Your generator is NOT thread-safe. Keras requires a thread-safe generator when `use_multiprocessing=False, workers > 1`. ') Raise raises:e" - }, - { - "library": "numpy", - "name": "argmax", - "source_code": "def argmax(self, axis = None, out = None): return N.ndarray.argmax(self, axis, out)._align(axis)", - "docstring": "Indexes of the maximum values along an axis. Return the indexes of the first occurrences of the maximum values along the specified axis. If axis is None, the index is for the flattened matrix. Parameters ---------- See for complete descriptions See Also -------- numpy.argmax Notes ----- This is the same as , but returns a object where would return an . Examples -------- >>> x = np.matrix(np.arange(12).reshape((3,4))); x matrix([[ 0, 1, 2, 3], [ 4, 5, 6, 7], [ 8, 9, 10, 11]]) >>> x.argmax() 11 >>> x.argmax(0) matrix([[2, 2, 2, 2]]) >>> x.argmax(1) matrix([[3], [3], [3]])", - "type": "method", - "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py", - "ast_data": "FunctionDef name:argmax arguments arg:self arg:axis arg:out Return return:yes" - }, - { - "library": "tensorflow", - "name": "shape", - "source_code": "@property def shape(self): raise NotImplementedError", - "docstring": "The of this variable. Returns: A .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", - "ast_data": "FunctionDef name:shape arguments arg:self Raise raises:NotImplementedError" - }, - { - "library": "sphinx", - "name": "visit_ImportFrom", - "source_code": "def visit_ImportFrom(self, node: ast.ImportFrom) -> None: for name in node.names: self.add_entry(name.asname or name.name) if node.module not in {'typing', 'typing_extensions'}: continue if name.name = = 'final': self.typing_final_names.add(name.asname or name.name) elif name.name = = 'overload': self.typing_overload_names.add(name.asname or name.name)", - "docstring": "Handles Import node and record the order of definitions.", - "type": "method", - "file_path": "sphinx\\sphinx\\pycode\\parser.py", - "ast_data": "FunctionDef name:visit_ImportFrom arguments arg:self arg:node type:ast.ImportFrom For If Compare op:NotIn If Compare op:Eq If Compare op:Eq" - }, - { - "library": "scipy", - "name": "sh_chebyt", - "source_code": "def sh_chebyt(n, monic = False): base = sh_jacobi(n, 0.0, 0.5, monic = monic) if monic: return base if n > 0: factor = 4 ** n / 2.0 else: factor = 1.0 base._scale(factor) return base", - "docstring": "Shifted Chebyshev polynomial of the first kind. Defined as :math: for :math: the nth Chebyshev polynomial of the first kind. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If , scale the leading coefficient to be 1. Default is . Returns ------- T : orthopoly1d Shifted Chebyshev polynomial of the first kind. Notes ----- The polynomials :math: are orthogonal over :math: with weight function :math:.", - "type": "function", - "file_path": "scipy\\scipy\\special\\_orthogonal.py", - "ast_data": "FunctionDef name:sh_chebyt arguments arg:n arg:monic Assign Call call:sh_jacobi If Return return:yes If Compare op:Gt Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "import_to_tensorboard", - "source_code": "def import_to_tensorboard(model_dir, log_dir, tag_set): with session.Session(graph = ops.Graph()) as sess: input_graph_def = saved_model_utils.get_meta_graph_def(model_dir, tag_set).graph_def importer.import_graph_def(input_graph_def) pb_visual_writer = summary.FileWriter(log_dir) pb_visual_writer.add_graph(sess.graph) print('Model Imported. Visualize by running: tensorboard --logdir = {}'.format(log_dir))", - "docstring": "View an SavedModel as a graph in Tensorboard. Args: model_dir: The directory containing the SavedModel to import. log_dir: The location for the Tensorboard log to begin visualization from. tag_set: Group of tag(s) of the MetaGraphDef to load, in string format, separated by ','. For tag-set contains multiple tags, all tags must be passed in. Usage: Call this function with your SavedModel location and desired log directory. Launch Tensorboard by pointing it to the log directory. View your imported SavedModel as a graph.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\tools\\import_pb_to_tensorboard.py", - "ast_data": "FunctionDef name:import_to_tensorboard arguments arg:model_dir arg:log_dir arg:tag_set With Assign Assign Call call:FileWriter" - }, - { - "library": "cherrypy", - "name": "unsubscribe", - "source_code": "def unsubscribe(self): self.bus.unsubscribe('start', self.start) self.bus.unsubscribe('stop', self.stop)", - "docstring": "Unsubcribe control methods to the bus lifecycle events.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\process\\servers.py", - "ast_data": "FunctionDef name:unsubscribe arguments arg:self" - }, - { - "library": "mongo", - "name": "index", - "source_code": "def index(self, idx: int) -> int: return self.index_map[idx]", - "docstring": "Get the original index of an operation in this run. :param idx: The Run index that maps to the original index.", - "type": "method", - "file_path": "mongo\\pymongo\\bulk_shared.py", - "ast_data": "FunctionDef name:index arguments arg:self arg:idx type:int Return return:yes" - }, - { - "library": "pandas", - "name": "has_dropped_na", - "source_code": "@final @cache_readonly def has_dropped_na(self) -> bool: return bool((self.ids < 0).any())", - "docstring": "Whether grouper has null value(s) that are dropped.", - "type": "method", - "file_path": "pandas\\pandas\\core\\groupby\\ops.py", - "ast_data": "FunctionDef name:has_dropped_na arguments arg:self Return return:yes" - }, - { - "library": "mongo", - "name": "add_delete", - "source_code": "def add_delete(self, namespace: str, selector: Mapping[str, Any], multi: bool, collation: Optional[Mapping[str, Any]] = None, hint: Union[str, dict[str, Any], None] = None) -> None: cmd = {'delete': -1, 'filter': selector, 'multi': multi} if hint is not None: cmd['hint'] = hint if collation is not None: self.uses_collation = True cmd['collation'] = collation if multi: self.is_retryable = False self.ops.append(('delete', cmd)) self.namespaces.append(namespace) self.total_ops + = 1", - "docstring": "Create a delete document and add it to the list of ops.", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\client_bulk.py", - "ast_data": "FunctionDef name:add_delete arguments arg:self arg:namespace type:str arg:selector type:Mapping[str, Any] arg:multi type:bool arg:collation type:Optional[Mapping[str, Any]] arg:hint type:Union[str, dict[str, Any], None] Assign If Compare op:IsNot Assign If Compare op:IsNot Assign Assign If Assign" - }, - { - "library": "mongo", - "name": "close", - "source_code": "async def close(self) -> None: self.client_ref = None self.key_vault_coll = None if self.mongocryptd_client: await self.mongocryptd_client.close() self.mongocryptd_client = None", - "docstring": "Release resources. Note it is not safe to call this method from __del__ or any GC hooks.", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\encryption.py", - "ast_data": "AsyncFunctionDef name:close arguments arg:self Assign Assign If Assign" - }, - { - "library": "coconut", - "name": "pretty_req", - "source_code": "def pretty_req(req): if isinstance(req, tuple): base_req, env_marker = req else: base_req, env_marker = (req, None) return base_req + (' (' + env_marker + ')' if env_marker else '')", - "docstring": "Get a string representation of the given requirement.", - "type": "function", - "file_path": "coconut\\coconut\\requirements.py", - "ast_data": "FunctionDef name:pretty_req arguments arg:req If Call call:isinstance Assign Assign Return return:yes" - }, - { - "library": "scipy", - "name": "assert_almost_equal", - "source_code": "def assert_almost_equal(actual, desired, decimal = 7, *args, **kwds): rtol, atol = (0, 1.5 * 10 ** (-decimal)) return xp_assert_close(actual, desired, *args, atol = atol, rtol = rtol, check_dtype = False, check_shape = False, **kwds)", - "docstring": "Backwards compatible replacement. In new code, use xp_assert_close instead.", - "type": "function", - "file_path": "scipy\\scipy\\_lib\\_array_api_no_0d.py", - "ast_data": "FunctionDef name:assert_almost_equal arguments arg:actual arg:desired arg:decimal vararg:args kwarg:kwds Assign Return return:yes" - }, - { - "library": "kornia", - "name": "Normalize", - "source_code": "class Normalize(IntensityAugmentationBase2D): def __init__(self, mean: Tensor | tuple[float, ...] | list[float] | float, std: Tensor | tuple[float, ...] | list[float] | float, p: float = 1.0, keepdim: bool = False) -> None: super().__init__(p = p, same_on_batch = True, keepdim = keepdim) if isinstance(mean, (int, float)): mean = torch.tensor([mean]) if isinstance(std, (int, float)): std = torch.tensor([std]) if isinstance(mean, (tuple, list)): mean = torch.tensor(mean) if isinstance(std, (tuple, list)): std = torch.tensor(std) self.flags = {'mean': mean, 'std': std} def apply_transform(self, input: Tensor, params: dict[str, Tensor], flags: dict[str, Any], transform: Optional[Tensor] = None) -> Tensor: return normalize(input, flags['mean'], flags['std'])", - "docstring": "Normalize tensor images with mean and standard deviation. .. math:: \\text{input[channel] = (input[channel] - mean[channel]) / std[channel]} Where is :math: and :math: for channels, Args: mean: Mean for each channel. std: Standard deviations for each channel. p: probability of applying the transformation. keepdim: whether to keep the output shape the same as input (True) or broadcast it to the batch form (False). Return: Normalised tensor with same size as input :math:. .. note:: This function internally uses :func:. Examples: >>> norm = Normalize(mean=torch.zeros(4), std=torch.ones(4)) >>> x = torch.rand(1, 4, 3, 3) >>> out = norm(x) >>> out.shape torch.Size([1, 4, 3, 3])", - "type": "class", - "file_path": "kornia\\kornia\\augmentation\\_2d\\intensity\\normalize.py", - "ast_data": "ClassDef name:Normalize FunctionDef name:__init__ arguments arg:self arg:mean type:Tensor | tuple[float, ...] | list[float] | float arg:std type:Tensor | tuple[float, ...] | list[float] | float arg:p type:float arg:keepdim type:bool If Call call:isinstance Assign Call call:tensor If Call call:isinstance Assign Call call:tensor If Call call:isinstance Assign Call call:tensor If Call call:isinstance Assign Call call:tensor Assign FunctionDef name:apply_transform arguments arg:self arg:input type:Tensor arg:params type:dict[str, Tensor] arg:flags type:dict[str, Any] arg:transform type:Optional[Tensor] Return return:yes" - }, - { - "library": "mongo", - "name": "update_search_index", - "source_code": "async def update_search_index(self, name: str, definition: Mapping[str, Any], session: Optional[AsyncClientSession] = None, comment: Optional[Any] = None, **kwargs: Any) -> None: cmd = {'updateSearchIndex': self._name, 'name': name, 'definition': definition} cmd.update(kwargs) if comment is not None: cmd['comment'] = comment async with await self._conn_for_writes(session, operation = _Op.UPDATE_SEARCH_INDEX) as conn: await self._command(conn, cmd, read_preference = ReadPreference.PRIMARY, allowable_errors = ['ns not found', 26], codec_options = _UNICODE_REPLACE_CODEC_OPTIONS)", - "docstring": "Update a search index by replacing the existing index definition with the provided definition. :param name: The name of the search index to be updated. :param definition: The new search index definition. :param session: a :class:. :param comment: A user-provided comment to attach to this command. :param kwargs: optional arguments to the updateSearchIndexes command (like maxTimeMS) can be passed as keyword arguments. .. note:: requires a MongoDB server version 7.0+ Atlas cluster. .. versionadded:: 4.5", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\collection.py", - "ast_data": "AsyncFunctionDef name:update_search_index arguments arg:self arg:name type:str arg:definition type:Mapping[str, Any] arg:session type:Optional[AsyncClientSession] arg:comment type:Optional[Any] kwarg:kwargs Assign If Compare op:IsNot Assign" - }, - { - "library": "django", - "name": "get_files", - "source_code": "def get_files(storage, ignore_patterns = None, location = ''): if ignore_patterns is None: ignore_patterns = [] directories, files = storage.listdir(location) for fn in files: if matches_patterns(fn, ignore_patterns): continue if location: fn = os.path.join(location, fn) if matches_patterns(fn, ignore_patterns): continue yield fn for dir in directories: if matches_patterns(dir, ignore_patterns): continue if location: dir = os.path.join(location, dir) yield from get_files(storage, ignore_patterns, dir)", - "docstring": "Recursively walk the storage directories yielding the paths of all files that should be copied.", - "type": "function", - "file_path": "django\\django\\contrib\\staticfiles\\utils.py", - "ast_data": "FunctionDef name:get_files arguments arg:storage arg:ignore_patterns arg:location If Compare op:Is Assign Assign Call call:listdir For If Call call:matches_patterns If Assign Call call:join If Call call:matches_patterns For If Call call:matches_patterns If Assign Call call:join" - }, - { - "library": "pandas", - "name": "is_complex_dtype", - "source_code": "def is_complex_dtype(arr_or_dtype) -> bool: return _is_dtype_type(arr_or_dtype, classes(np.complexfloating))", - "docstring": "Check whether the provided array or dtype is of a complex dtype. Parameters ---------- arr_or_dtype : array-like or dtype The array or dtype to check. Returns ------- boolean Whether or not the array or dtype is of a complex dtype. See Also -------- api.types.is_complex: Return True if given object is complex. api.types.is_numeric_dtype: Check whether the provided array or dtype is of a numeric dtype. api.types.is_integer_dtype: Check whether the provided array or dtype is of an integer dtype. Examples -------- >>> from pandas.api.types import is_complex_dtype >>> is_complex_dtype(str) False >>> is_complex_dtype(int) False >>> is_complex_dtype(np.complex128) True >>> is_complex_dtype(np.array([\"a\", \"b\"])) False >>> is_complex_dtype(pd.Series([1, 2])) False >>> is_complex_dtype(np.array([1 + 1j, 5])) True", - "type": "function", - "file_path": "pandas\\pandas\\core\\dtypes\\common.py", - "ast_data": "FunctionDef name:is_complex_dtype arguments arg:arr_or_dtype Return return:yes" - }, - { - "library": "scipy", - "name": "itilbert", - "source_code": "def itilbert(x, h, period = None, _cache = _cache): if isinstance(_cache, threading.local): if not hasattr(_cache, 'itilbert_cache'): _cache.itilbert_cache = {} _cache = _cache.itilbert_cache tmp = asarray(x) if iscomplexobj(tmp): return itilbert(tmp.real, h, period, _cache) + 1j * itilbert(tmp.imag, h, period, _cache) if period is not None: h = h * 2 * pi / period n = len(x) omega = _cache.get((n, h)) if omega is None: if len(_cache) > 20: while _cache: _cache.popitem() def kernel(k, h = h): if k: return -tanh(h * k) return 0 omega = convolve.init_convolution_kernel(n, kernel, d = 1) _cache[n, h] = omega overwrite_x = _datacopied(tmp, x) return convolve.convolve(tmp, omega, swap_real_imag = 1, overwrite_x = overwrite_x)", - "docstring": "Return inverse h-Tilbert transform of a periodic sequence x. If `tilbert`.", - "type": "function", - "file_path": "scipy\\scipy\\fftpack\\_pseudo_diffs.py", - "ast_data": "FunctionDef name:itilbert arguments arg:x arg:h arg:period arg:_cache If Call call:isinstance If Assign Assign Assign Call call:asarray If Call call:iscomplexobj Return return:yes If Compare op:IsNot Assign Assign Call call:len Assign Call call:get If Compare op:Is If Compare op:Gt While FunctionDef name:kernel arguments arg:k arg:h If Return return:yes Return return:yes Assign Call call:init_convolution_kernel Assign Assign Call call:_datacopied Return return:yes" - }, - { - "library": "pytorch", - "name": "load_state_dict", - "source_code": "def load_state_dict(self, state_dict: dict[str, Any]) -> None: ...", - "docstring": "Restore the object's state from the provided state_dict. Args: state_dict: The state dict to restore from", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\checkpoint\\stateful.py", - "ast_data": "FunctionDef name:load_state_dict arguments arg:self arg:state_dict type:dict[str, Any]" - }, - { - "library": "scipy", - "name": "pdf", - "source_code": "def pdf(self, x): return np.exp(self.logpdf(x))", - "docstring": "Parameters ---------- x : array_like Points at which to evaluate the log of the probability density function. The last axis of must correspond to unit vectors of the same dimensionality as the distribution. Returns ------- pdf : ndarray or scalar Probability density function evaluated at .", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_multivariate.py", - "ast_data": "FunctionDef name:pdf arguments arg:self arg:x Return return:yes" - }, - { - "library": "scipy", - "name": "polynomial_matrix", - "source_code": "def polynomial_matrix(x, powers, out): for i in range(x.shape[0]): for j in range(powers.shape[0]): out[i, j] = np.prod(x[i] ** powers[j])", - "docstring": "Evaluate monomials, with exponents from , at .", - "type": "function", - "file_path": "scipy\\scipy\\interpolate\\_rbfinterp_pythran.py", - "ast_data": "FunctionDef name:polynomial_matrix arguments arg:x arg:powers arg:out For Call call:range For Call call:range Assign Call call:prod" - }, - { - "library": "pytorch", - "name": "linear_inference_rule", - "source_code": "@register_inference_rule(torch.nn.Linear) def linear_inference_rule(n: Node, module_instance): assert isinstance(n.args[0], Node) if n.args[0].type = = Dyn and isinstance(n.type, TensorType): n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__)) if isinstance(n.args[0].type, TensorType): output_type = linear_check(n.args[0].type, module_instance) n.type = get_greatest_upper_bound(output_type, n.type) return n.type", - "docstring": "Applies the shape information to the input then gets the greatest upper bound of the resulting type and the existing type", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py", - "ast_data": "FunctionDef name:linear_inference_rule arguments arg:n type:Node arg:module_instance Call call:register_inference_rule If BoolOp Compare op:Eq Call call:isinstance Assign Call call:expand_to_tensor_dim If Call call:isinstance Assign Call call:linear_check Assign Call call:get_greatest_upper_bound Return return:yes" - }, - { - "library": "mongo", - "name": "write_concern", - "source_code": "@property def write_concern(self) -> Optional[WriteConcern]: return self._write_concern", - "docstring": "This transaction's :class:.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\client_session.py", - "ast_data": "FunctionDef name:write_concern arguments arg:self Return return:yes" - }, - { - "library": "salmon", - "name": "render_pep440_branch", - "source_code": "def render_pep440_branch(pieces: Dict[str, Any]) -> str: if pieces['closest-tag']: rendered = pieces['closest-tag'] if pieces['distance'] or pieces['dirty']: if pieces['branch'] ! = 'master': rendered + = '.dev0' rendered + = plus_or_dot(pieces) rendered + = '%d.g%s' % (pieces['distance'], pieces['short']) if pieces['dirty']: rendered + = '.dirty' else: rendered = '0' if pieces['branch'] ! = 'master': rendered + = '.dev0' rendered + = '+untagged.%d.g%s' % (pieces['distance'], pieces['short']) if pieces['dirty']: rendered + = '.dirty' return rendered", - "docstring": "TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . The \".dev0\" means not master branch. Note that .dev0 sorts backwards (a feature branch will appear \"older\" than the master branch). Exceptions: 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]", - "type": "function", - "file_path": "salmon\\versioneer.py", - "ast_data": "FunctionDef name:render_pep440_branch arguments arg:pieces type:Dict[str, Any] If Assign If BoolOp If Compare op:NotEq If Assign If Compare op:NotEq If Return return:yes" - }, - { - "library": "coconut", - "name": "in_incremental_mode", - "source_code": "def in_incremental_mode(): return ParserElement._incrementalEnabled and (not ParserElement._incrementalWithResets)", - "docstring": "Determine if we are using incremental parsing mode.", - "type": "function", - "file_path": "coconut\\coconut\\compiler\\util.py", - "ast_data": "FunctionDef name:in_incremental_mode arguments Return return:yes" - }, - { - "library": "django", - "name": "exclude", - "source_code": "def exclude(self, *args, **kwargs): self._not_support_combined_queries('exclude') return self._filter_or_exclude(True, args, kwargs)", - "docstring": "Return a new QuerySet instance with NOT (args) ANDed to the existing set.", - "type": "method", - "file_path": "django\\django\\db\\models\\query.py", - "ast_data": "FunctionDef name:exclude arguments arg:self vararg:args kwarg:kwargs Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, tpu_hardware_feature_proto): self.tpu_hardware_feature_proto = tpu_hardware_feature_proto", - "docstring": "Store TPU hardware feature info. Args: tpu_hardware_feature_proto: protobuf which describe the tpu hardware feature.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_hardware_feature.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:tpu_hardware_feature_proto Assign" - }, - { - "library": "mongo", - "name": "collection_info", - "source_code": "def collection_info(self, database: str, filter: bytes) -> Optional[list[bytes]]: with self.client_ref()[database].list_collections(filter = RawBSONDocument(filter)) as cursor: return [_dict_to_bson(doc, False, _DATA_KEY_OPTS) for doc in cursor]", - "docstring": "Get the collection info for a namespace. The returned collection info is passed to libmongocrypt which reads the JSON schema. :param database: The database on which to run listCollections. :param filter: The filter to pass to listCollections. :return: All documents from the listCollections command response as BSON.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\encryption.py", - "ast_data": "FunctionDef name:collection_info arguments arg:self arg:database type:str arg:filter type:bytes With Return return:yes" - }, - { - "library": "scikit-learn", - "name": "diag", - "source_code": "def diag(self, X): return np.full(_num_samples(X), self.noise_level, dtype = np.array(self.noise_level).dtype)", - "docstring": "Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Argument to the kernel. Returns ------- K_diag : ndarray of shape (n_samples_X,) Diagonal of kernel k(X, X)", - "type": "method", - "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", - "ast_data": "FunctionDef name:diag arguments arg:self arg:X Return return:yes" - }, - { - "library": "scrapy", - "name": "follow_all", - "source_code": "def follow_all(self, urls: Iterable[str | Link], callback: CallbackT | None = None, method: str = 'GET', headers: Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]] | None = None, body: bytes | str | None = None, cookies: CookiesT | None = None, meta: dict[str, Any] | None = None, encoding: str | None = 'utf-8', priority: int = 0, dont_filter: bool = False, errback: Callable[[Failure], Any] | None = None, cb_kwargs: dict[str, Any] | None = None, flags: list[str] | None = None) -> Iterable[Request]: if not hasattr(urls, '__iter__'): raise TypeError(\"'urls' argument must be an iterable\") return (self.follow(url = url, callback = callback, method = method, headers = headers, body = body, cookies = cookies, meta = meta, encoding = encoding, priority = priority, dont_filter = dont_filter, errback = errback, cb_kwargs = cb_kwargs, flags = flags) for url in urls)", - "docstring": ".. versionadded:: 2.0 Return an iterable of :class: instances to follow all links in `~scrapy.link.Link~.TextResponse~.TextResponse.follow_all` method which supports selectors in addition to absolute/relative URLs and Link objects.", - "type": "method", - "file_path": "scrapy\\scrapy\\http\\response\\__init__.py", - "ast_data": "FunctionDef name:follow_all arguments arg:self arg:urls type:Iterable[str | Link] arg:callback type:CallbackT | None arg:method type:str arg:headers type:Mapping[AnyStr, Any] | Iterable[tuple[AnyStr, Any]] | None arg:body type:bytes | str | None arg:cookies type:CookiesT | None arg:meta type:dict[str, Any] | None arg:encoding type:str | None arg:priority type:int arg:dont_filter type:bool arg:errback type:Callable[[Failure], Any] | None arg:cb_kwargs type:dict[str, Any] | None arg:flags type:list[str] | None If Raise raises:TypeError(\"'urls' argument must be an iterable\") Return return:yes" - }, - { - "library": "pytorch", - "name": "validate_model", - "source_code": "def validate_model(self, model, example_inputs): model = self.deepcopy_model(model) example_inputs = clone_inputs(example_inputs) model, example_inputs = self.cast_based_on_args(model, example_inputs) try: self.model_iter_fn(model, example_inputs) except Exception as e: raise RuntimeError('Eager run failed') from e", - "docstring": "Runs the eager model with example inputs to ensure that eager passes.", - "type": "method", - "file_path": "pytorch\\benchmarks\\dynamo\\common.py", - "ast_data": "FunctionDef name:validate_model arguments arg:self arg:model arg:example_inputs Assign Call call:deepcopy_model Assign Call call:clone_inputs Assign Call call:cast_based_on_args Try ExceptHandler Raise raises:RuntimeError('Eager run failed')" - }, - { - "library": "algorithms", - "name": "Kosaraju", - "source_code": "class Kosaraju: def dfs(self, i, V, adj, visited, stk): visited[i] = 1 for x in adj[i]: if visited[x] = = -1: self.dfs(x, V, adj, visited, stk) stk.append(i) def kosaraju(self, V, adj): stk, visited = ([], [-1] * (V + 1)) for i in range(V): if visited[i] = = -1: self.dfs(i, V, adj, visited, stk) stk.reverse() res = stk.copy() ans, visited1 = (0, [-1] * (V + 1)) adj1 = [[] for x in range(V)] for i in range(len(adj)): for x in adj[i]: adj1[x].append(i) for i in range(len(res)): if visited1[res[i]] = = -1: ans + = 1 self.dfs(res[i], V, adj1, visited1, stk) return ans", - "docstring": "Kosaraju's algorithm use depth first search approach to find strongly connected components in a directed graph. Approach: 1. Make a DFS call to keep track of finish time of each vertex. 2. Tranpose the original graph. ie 1->2 transpose is 1<-2 3. Make another DFS call to calculate strongly connected components.", - "type": "class", - "file_path": "algorithms\\algorithms\\graph\\strongly_connected_components_kosaraju.py", - "ast_data": "ClassDef name:Kosaraju FunctionDef name:dfs arguments arg:self arg:i arg:V arg:adj arg:visited arg:stk Assign For If Compare op:Eq FunctionDef name:kosaraju arguments arg:self arg:V arg:adj Assign For Call call:range If Compare op:Eq Assign Call call:copy Assign Assign For Call call:range For For Call call:range If Compare op:Eq Return return:yes" - }, - { - "library": "pytorch", - "name": "parse_settings_from_text", - "source_code": "def parse_settings_from_text(settings_text: str) -> Settings: try: if settings_text: backtick = chr(96) settings_text = settings_text.strip(f'\\r\\n\\t{backtick} ') settings = load_yaml(settings_text) experiments = {} for exp_name, exp_settings in settings.get(SETTING_EXPERIMENTS).items(): if not is_valid_experiment_name(exp_name): continue valid_settings = {} for setting in exp_settings: if setting not in Experiment._fields: log.warning(f'Unexpected setting in experiment: {setting} = {exp_settings[setting]}') else: valid_settings[setting] = exp_settings[setting] experiments[exp_name] = Experiment(**valid_settings) return Settings(experiments) except Exception: log.exception('Failed to parse settings') return Settings()", - "docstring": "Parse the experiments from the issue body into a list of ExperimentSettings", - "type": "function", - "file_path": "pytorch\\.github\\scripts\\runner_determinator.py", - "ast_data": "FunctionDef name:parse_settings_from_text arguments arg:settings_text type:str Try If Assign Call call:chr Assign Call call:strip Assign Call call:load_yaml Assign For Call call:items If Assign For If Compare op:NotIn Assign Assign Call call:Experiment Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "pytorch", - "name": "register_module_backward_hook", - "source_code": "def register_module_backward_hook(hook: Callable[['Module', _grad_t, _grad_t], Union[None, _grad_t]]) -> RemovableHandle: global _global_is_full_backward_hook if _global_is_full_backward_hook is True: raise RuntimeError('Cannot use both regular backward hooks and full backward hooks as a global Module hook. Please use only one of them.') _global_is_full_backward_hook = False handle = RemovableHandle(_global_backward_hooks) _global_backward_hooks[handle.id] = hook return handle", - "docstring": "Register a backward hook common to all the modules. This function is deprecated in favor of :func: and the behavior of this function will change in future versions. Returns: :class:: a handle that can be used to remove the added hook by calling ``", - "type": "function", - "file_path": "pytorch\\torch\\nn\\modules\\module.py", - "ast_data": "FunctionDef name:register_module_backward_hook arguments arg:hook type:Callable[['Module', _grad_t, _grad_t], Union[None, _grad_t]] If Compare op:Is Raise raises:RuntimeError('Cannot use both regular backward hooks and full backward hooks as a global Module hook. Please use only one of them.') Assign Assign Call call:RemovableHandle Assign Return return:yes" - }, - { - "library": "virtualenv", - "name": "path_exe_finder", - "source_code": "def path_exe_finder(spec: PythonSpec) -> Callable[[Path], Generator[tuple[Path, bool], None, None]]: pat = spec.generate_re(windows = sys.platform = = 'win32') direct = spec.str_spec if sys.platform = = 'win32': direct = f'{direct}.exe' def path_exes(path: Path) -> Generator[tuple[Path, bool], None, None]: direct_path = path / direct if direct_path.exists(): yield (direct_path, False) for exe in path.iterdir(): match = pat.fullmatch(exe.name) if match: yield (exe.absolute(), match['impl'] = = 'python') return path_exes", - "docstring": "Given a spec, return a function that can be called on a path to find all matching files in it.", - "type": "function", - "file_path": "virtualenv\\src\\virtualenv\\discovery\\builtin.py", - "ast_data": "FunctionDef name:path_exe_finder arguments arg:spec type:PythonSpec Assign Call call:generate_re Assign If Compare op:Eq Assign FunctionDef name:path_exes arguments arg:path type:Path Assign If Call call:exists For Call call:iterdir Assign Call call:fullmatch If Return return:yes" - }, - { - "library": "seaborn", - "name": "get_mapping", - "source_code": "def get_mapping(self, scale: Scale, data: Series) -> Mapping: if isinstance(scale, Nominal): return self._get_nominal_mapping(scale, data) elif isinstance(scale, Boolean): return self._get_boolean_mapping(scale, data) if scale.values is None: vmin, vmax = self._forward(self.default_range) elif isinstance(scale.values, tuple) and len(scale.values) = = 2: vmin, vmax = self._forward(scale.values) else: if isinstance(scale.values, tuple): actual = f'{len(scale.values)}-tuple' else: actual = str(type(scale.values)) scale_class = scale.__class__.__name__ err = ' '.join([f'Values for {self.variable} variables with {scale_class} scale', f'must be 2-tuple; not {actual}.']) raise TypeError(err) def mapping(x): return self._inverse(np.multiply(x, vmax - vmin) + vmin) return mapping", - "docstring": "Return a function that maps from data domain to property range.", - "type": "method", - "file_path": "seaborn\\seaborn\\_core\\properties.py", - "ast_data": "FunctionDef name:get_mapping arguments arg:self arg:scale type:Scale arg:data type:Series If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes If Compare op:Is Assign Call call:_forward If BoolOp Call call:isinstance Compare op:Eq Assign Call call:_forward If Call call:isinstance Assign Assign Call call:str Assign Assign Call call:join Raise raises:TypeError(err) FunctionDef name:mapping arguments arg:x Return return:yes Return return:yes" - }, - { - "library": "mongo", - "name": "partial_result", - "source_code": "@property def partial_result(self) -> Optional[ClientBulkWriteResult]: from pymongo.results import ClientBulkWriteResult if self.details.get('anySuccessful'): return ClientBulkWriteResult(self.details, acknowledged = True, has_verbose_results = self.verbose) return None", - "docstring": "The results of any successful operations that were performed before the error was encountered.", - "type": "method", - "file_path": "mongo\\pymongo\\errors.py", - "ast_data": "FunctionDef name:partial_result arguments arg:self If Call call:get Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "Sigmoid", - "source_code": "class Sigmoid(torch.nn.Sigmoid): def __init__(self, output_scale: float, output_zero_point: int): super().__init__() self.output_scale = output_scale self.output_zero_point = output_zero_point def forward(self, input): return torch.ops.quantized.sigmoid(input, self.output_scale, self.output_zero_point) @classmethod def from_float(cls, mod, use_precomputed_fake_quant = False): output_scale, output_zero_point = mod.activation_post_process.calculate_qparams() return cls(float(output_scale), int(output_zero_point))", - "docstring": "This is the quantized equivalent of :class:. Args: scale: quantization scale of the output tensor zero_point: quantization zero point of the output tensor", - "type": "class", - "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\activation.py", - "ast_data": "ClassDef name:Sigmoid FunctionDef name:__init__ arguments arg:self arg:output_scale type:float arg:output_zero_point type:int Assign Assign FunctionDef name:forward arguments arg:self arg:input Return return:yes FunctionDef name:from_float arguments arg:cls arg:mod arg:use_precomputed_fake_quant Assign Call call:calculate_qparams Return return:yes" - }, - { - "library": "tensorflow", - "name": "DeadlineExceededError", - "source_code": "@tf_export('errors.DeadlineExceededError') class DeadlineExceededError(OpError): def __init__(self, node_def, op, message, *args): super(DeadlineExceededError, self).__init__(node_def, op, message, DEADLINE_EXCEEDED, *args)", - "docstring": "Raised when a deadline expires before an operation could complete. This exception is not currently used.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py", - "ast_data": "ClassDef name:DeadlineExceededError Call call:tf_export FunctionDef name:__init__ arguments arg:self arg:node_def arg:op arg:message vararg:args" - }, - { - "library": "django", - "name": "deactivate", - "source_code": "def deactivate(): if hasattr(_active, 'value'): del _active.value", - "docstring": "Uninstall the active translation object so that further _() calls resolve to the default translation object.", - "type": "function", - "file_path": "django\\django\\utils\\translation\\trans_real.py", - "ast_data": "FunctionDef name:deactivate arguments If Call call:hasattr" - }, - { - "library": "cryptography", - "name": "public_bytes_raw", - "source_code": "@abc.abstractmethod def public_bytes_raw(self) -> bytes: pass", - "docstring": "The raw bytes of the public key. Equivalent to public_bytes(Raw, Raw).", - "type": "method", - "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed25519.py", - "ast_data": "FunctionDef name:public_bytes_raw arguments arg:self" - }, - { - "library": "django", - "name": "savepoint_rollback", - "source_code": "@async_unsafe def savepoint_rollback(self, sid): if not self._savepoint_allowed(): return self.validate_thread_sharing() self._savepoint_rollback(sid) self.run_on_commit = [(sids, func, robust) for sids, func, robust in self.run_on_commit if sid not in sids]", - "docstring": "Roll back to a savepoint. Do nothing if savepoints are not supported.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\base.py", - "ast_data": "FunctionDef name:savepoint_rollback arguments arg:self arg:sid If Return return:no Assign" - }, - { - "library": "tensorflow", - "name": "get_next", - "source_code": "def get_next(self, device = None): if device is not None: index = self._devices.index(device) return self._device_iterators[index].get_next() result = [] for i, device in enumerate(self._devices): with ops.device(device): result.append(self._device_iterators[i].get_next()) return result", - "docstring": "Returns the next element given a , else returns all in a list.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\multi_device_iterator_ops.py", - "ast_data": "FunctionDef name:get_next arguments arg:self arg:device If Compare op:IsNot Assign Call call:index Return return:yes Assign For Call call:enumerate With Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, name: Union[str, bytes], bound_context: context.Context, function_type: function_type_lib.FunctionType, children: Optional[List['AtomicFunction']] = None, call_options: CallOptions = CallOptions(), cached_graph: Optional[func_graph_module.FuncGraph] = None): self._name = compat.as_bytes(name) self._bound_context = bound_context self._function_type = function_type self._children = children if children else [] self._call_options = call_options self._cached_definition = None self._cached_graph = cached_graph self._generated_graph = None ref_key = (self._bound_context.function_scope_id, self.name) if ref_key not in RUNTIME_FUNCTION_REFS: RUNTIME_FUNCTION_REFS[ref_key] = 1 else: RUNTIME_FUNCTION_REFS[ref_key] + = 1", - "docstring": "Construct a new AtomicFunction. Args: name: str/bytes name of the runtime function in the bound context. bound_context: interface to the runtime for the AtomicFunction. function_type: input/output contract for the AtomicFunction children: list of AtomicFunctions that are needed to call this one. call_options: extra configuration options for the call. cached_graph: FuncGraph that this AtomicFunction was generated from (if known). Otherwise it will lazily construct a new corresponding FuncGraph if ever needed.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\atomic_function.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:name type:Union[str, bytes] arg:bound_context type:context.Context arg:function_type type:function_type_lib.FunctionType arg:children type:Optional[List['AtomicFunction']] arg:call_options type:CallOptions arg:cached_graph type:Optional[func_graph_module.FuncGraph] Assign Call call:as_bytes Assign Assign Assign Assign Assign Assign Assign Assign If Compare op:NotIn Assign" - }, - { - "library": "django", - "name": "ManagementForm", - "source_code": "class ManagementForm(Form): TOTAL_FORMS = IntegerField(widget = HiddenInput) INITIAL_FORMS = IntegerField(widget = HiddenInput) MIN_NUM_FORMS = IntegerField(required = False, widget = HiddenInput) MAX_NUM_FORMS = IntegerField(required = False, widget = HiddenInput) def clean(self): cleaned_data = super().clean() cleaned_data.setdefault(TOTAL_FORM_COUNT, 0) cleaned_data.setdefault(INITIAL_FORM_COUNT, 0) return cleaned_data", - "docstring": "Keep track of how many form instances are displayed on the page. If adding new forms via JavaScript, you should increment the count field of this form as well.", - "type": "class", - "file_path": "django\\django\\forms\\formsets.py", - "ast_data": "ClassDef name:ManagementForm Assign Call call:IntegerField Assign Call call:IntegerField Assign Call call:IntegerField Assign Call call:IntegerField FunctionDef name:clean arguments arg:self Assign Call call:clean Return return:yes" - }, - { - "library": "salmon", - "name": "render", - "source_code": "def render(variables, template): return load(template).render(variables)", - "docstring": "Takes the variables given and renders the template for you. Assumes the template returned by load() will have a .render() method that takes the variables as a dict. Use this if you just want to render a single template and don't want it to be a message. Use render_message if the contents of the template are to be interpreted as a message with headers and a body.", - "type": "function", - "file_path": "salmon\\salmon\\view.py", - "ast_data": "FunctionDef name:render arguments arg:variables arg:template Return return:yes" - }, - { - "library": "algorithms", - "name": "min_distance", - "source_code": "def min_distance(self, dist, min_dist_set): min_dist = float('inf') for target in range(self.vertex_count): if min_dist_set[target]: continue if dist[target] < min_dist: min_dist = dist[target] min_index = target return min_index", - "docstring": "Find the vertex that is closest to the visited set", - "type": "method", - "file_path": "algorithms\\algorithms\\graph\\dijkstra.py", - "ast_data": "FunctionDef name:min_distance arguments arg:self arg:dist arg:min_dist_set Assign Call call:float For Call call:range If If Compare op:Lt Assign Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "FixedFormatter", - "source_code": "class FixedFormatter(Formatter): def __init__(self, seq): self.seq = seq self.offset_string = '' def __call__(self, x, pos = None): if pos is None or pos > = len(self.seq): return '' else: return self.seq[pos] def get_offset(self): return self.offset_string def set_offset_string(self, ofs): self.offset_string = ofs", - "docstring": "Return fixed strings for tick labels based only on position, not value. .. note:: should only be used together with . Otherwise, the labels may end up in unexpected positions.", - "type": "class", - "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", - "ast_data": "ClassDef name:FixedFormatter FunctionDef name:__init__ arguments arg:self arg:seq Assign Assign FunctionDef name:__call__ arguments arg:self arg:x arg:pos If BoolOp Compare op:Is Compare op:GtE Return return:yes Return return:yes FunctionDef name:get_offset arguments arg:self Return return:yes FunctionDef name:set_offset_string arguments arg:self arg:ofs Assign" - }, - { - "library": "mongo", - "name": "set_conn_timeout", - "source_code": "def set_conn_timeout(self, timeout: Optional[float]) -> None: if timeout = = self.last_timeout: return self.last_timeout = timeout self.conn.get_conn.settimeout(timeout)", - "docstring": "Cache last timeout to avoid duplicate calls to conn.settimeout.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\pool.py", - "ast_data": "FunctionDef name:set_conn_timeout arguments arg:self arg:timeout type:Optional[float] If Compare op:Eq Return return:no Assign" - }, - { - "library": "django", - "name": "check_const_string", - "source_code": "def check_const_string(result, func, cargs, offset = None, cpl = False): if offset: check_err(result, cpl = cpl) ptr = ptr_byref(cargs, offset) return ptr.value else: return result", - "docstring": "Similar functionality to , but does not free the pointer.", - "type": "function", - "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\errcheck.py", - "ast_data": "FunctionDef name:check_const_string arguments arg:result arg:func arg:cargs arg:offset arg:cpl If Assign Call call:ptr_byref Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "snapshot", - "source_code": "def snapshot() -> dict[str, Any]: return torch._C._mtia_memorySnapshot()", - "docstring": "Return a dictionary of MTIA memory allocator history", - "type": "function", - "file_path": "pytorch\\torch\\mtia\\__init__.py", - "ast_data": "FunctionDef name:snapshot arguments Return return:yes" - }, - { - "library": "pytorch", - "name": "hash_tensor", - "source_code": "def hash_tensor(t: torch.Tensor) -> torch.Tensor: return t.detach().float().mean()", - "docstring": "Some inexpensive hash. Used as a quick and dirty indicator for tensor mutation", - "type": "function", - "file_path": "pytorch\\torch\\_library\\utils.py", - "ast_data": "FunctionDef name:hash_tensor arguments arg:t type:torch.Tensor Return return:yes" - }, - { - "library": "pytorch", - "name": "current_blas_handle", - "source_code": "def current_blas_handle(): _lazy_init() return torch._C._cuda_getCurrentBlasHandle()", - "docstring": "Return cublasHandle_t pointer to current cuBLAS handle", - "type": "function", - "file_path": "pytorch\\torch\\cuda\\__init__.py", - "ast_data": "FunctionDef name:current_blas_handle arguments Return return:yes" - }, - { - "library": "django", - "name": "SQLiteNumericMixin", - "source_code": "class SQLiteNumericMixin: def as_sqlite(self, compiler, connection, **extra_context): sql, params = self.as_sql(compiler, connection, **extra_context) try: if self.output_field.get_internal_type() = = 'DecimalField': sql = '(CAST(%s AS NUMERIC))' % sql except FieldError: pass return (sql, params)", - "docstring": "Some expressions with output_field=DecimalField() must be cast to numeric to be properly filtered.", - "type": "class", - "file_path": "django\\django\\db\\models\\expressions.py", - "ast_data": "ClassDef name:SQLiteNumericMixin FunctionDef name:as_sqlite arguments arg:self arg:compiler arg:connection kwarg:extra_context Assign Call call:as_sql Try If Compare op:Eq Assign ExceptHandler Return return:yes" - }, - { - "library": "pytorch", - "name": "forward", - "source_code": "def forward(self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: return (self.query_proj(query), self.key_proj(key), self.value_proj(value))", - "docstring": "Projects the input sequences using in-proj layers. Args: query, key, value (Tensors): sequence to be projected Shape: - query, key, value: :math: - Output: :math: where S is the sequence length, N is the batch size, and E is the embedding dimension.", - "type": "method", - "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchaudio_models.py", - "ast_data": "FunctionDef name:forward arguments arg:self arg:query type:torch.Tensor arg:key type:torch.Tensor arg:value type:torch.Tensor Return return:yes" - }, - { - "library": "pytorch", - "name": "Softsign", - "source_code": "class Softsign(Module): def forward(self, input: Tensor) -> Tensor: return F.softsign(input)", - "docstring": "Applies the element-wise Softsign function. .. math:: \\text{SoftSign}(x) = \\frac{x}{ 1 + |x|} Shape: - Input: :math:, where :math: means any number of dimensions. - Output: :math:, same shape as the input. .. image:: ../scripts/activation_images/Softsign.png Examples:: >>> m = nn.Softsign() >>> input = torch.randn(2) >>> output = m(input)", - "type": "class", - "file_path": "pytorch\\torch\\nn\\modules\\activation.py", - "ast_data": "ClassDef name:Softsign FunctionDef name:forward arguments arg:self arg:input type:Tensor Return return:yes" - }, - { - "library": "numpy", - "name": "mean", - "source_code": "def mean(self, axis = None, dtype = None, out = None, keepdims = np._NoValue): kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} if self._mask is nomask: result = super().mean(axis = axis, dtype = dtype, **kwargs)[()] else: is_float16_result = False if dtype is None: if issubclass(self.dtype.type, (ntypes.integer, ntypes.bool)): dtype = mu.dtype('f8') elif issubclass(self.dtype.type, ntypes.float16): dtype = mu.dtype('f4') is_float16_result = True dsum = self.sum(axis = axis, dtype = dtype, **kwargs) cnt = self.count(axis = axis, **kwargs) if cnt.shape = = () and cnt = = 0: result = masked elif is_float16_result: result = self.dtype.type(dsum * 1.0 / cnt) else: result = dsum * 1.0 / cnt if out is not None: out.flat = result if isinstance(out, MaskedArray): outmask = getmask(out) if outmask is nomask: outmask = out._mask = make_mask_none(out.shape) outmask.flat = getmask(result) return out return result", - "docstring": "Returns the average of the array elements along given axis. Masked entries are ignored, and result elements which are not finite will be masked. Refer to for full documentation. See Also -------- numpy.ndarray.mean : corresponding function for ndarrays numpy.mean : Equivalent function numpy.ma.average : Weighted average. Examples -------- >>> import numpy as np >>> a = np.ma.array([1,2,3], mask=[False, False, True]) >>> a masked_array(data=[1, 2, --], mask=[False, False, True], fill_value=999999) >>> a.mean() 1.5", - "type": "method", - "file_path": "numpy\\numpy\\ma\\core.py", - "ast_data": "FunctionDef name:mean arguments arg:self arg:axis arg:dtype arg:out arg:keepdims Assign If Compare op:Is Assign Assign If Compare op:Is If Call call:issubclass Assign Call call:dtype If Call call:issubclass Assign Call call:dtype Assign Assign Call call:sum Assign Call call:count If BoolOp Compare op:Eq Compare op:Eq Assign If Assign Call call:type Assign If Compare op:IsNot Assign If Call call:isinstance Assign Call call:getmask If Compare op:Is Assign Call call:make_mask_none Assign Call call:getmask Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "all_gather_dtensor", - "source_code": "@abstractmethod def all_gather_dtensor(self, tensor: DTensor, parent_mesh: Optional[DeviceMesh]) -> torch.Tensor: ...", - "docstring": "This is to be called before loading a *sharded* DTensor state dict. This gathers tensor in FSDP dimension and returns local tensor of TP DTensor.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\fsdp\\_fsdp_extensions.py", - "ast_data": "FunctionDef name:all_gather_dtensor arguments arg:self arg:tensor type:DTensor arg:parent_mesh type:Optional[DeviceMesh]" - }, - { - "library": "pytorch", - "name": "add_graph", - "source_code": "def add_graph(self, model, input_to_model = None, verbose = False, use_strict_trace = True): torch._C._log_api_usage_once('tensorboard.logging.add_graph') self._get_file_writer().add_graph(graph(model, input_to_model, verbose, use_strict_trace))", - "docstring": "Add graph data to summary. Args: model (torch.nn.Module): Model to draw. input_to_model (torch.Tensor or list of torch.Tensor): A variable or a tuple of variables to be fed. verbose (bool): Whether to print graph structure in console. use_strict_trace (bool): Whether to pass keyword argument to . Pass False when you want the tracer to record your mutable container types (list, dict)", - "type": "method", - "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py", - "ast_data": "FunctionDef name:add_graph arguments arg:self arg:model arg:input_to_model arg:verbose arg:use_strict_trace" - }, - { - "library": "pytorch", - "name": "Tanh", - "source_code": "class Tanh(Module): def forward(self, input: Tensor) -> Tensor: return torch.tanh(input)", - "docstring": "Applies the Hyperbolic Tangent (Tanh) function element-wise. Tanh is defined as: .. math:: \\text{Tanh}(x) = \\tanh(x) = \\frac{\\exp(x) - \\exp(-x)} {\\exp(x) + \\exp(-x)} Shape: - Input: :math:, where :math: means any number of dimensions. - Output: :math:, same shape as the input. .. image:: ../scripts/activation_images/Tanh.png Examples:: >>> m = nn.Tanh() >>> input = torch.randn(2) >>> output = m(input)", - "type": "class", - "file_path": "pytorch\\torch\\nn\\modules\\activation.py", - "ast_data": "ClassDef name:Tanh FunctionDef name:forward arguments arg:self arg:input type:Tensor Return return:yes" - }, - { - "library": "cherrypy", - "name": "after", - "source_code": "@classmethod def after(cls, elapsed): return cls(datetime.datetime.now(datetime.timezone.utc) + elapsed)", - "docstring": "Return a timer that will expire after passes.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\lib\\locking.py", - "ast_data": "FunctionDef name:after arguments arg:cls arg:elapsed Return return:yes" - }, - { - "library": "django", - "name": "has_permission", - "source_code": "def has_permission(self): perms = self.get_permission_required() return self.request.user.has_perms(perms)", - "docstring": "Override this method to customize the way permissions are checked.", - "type": "method", - "file_path": "django\\django\\contrib\\auth\\mixins.py", - "ast_data": "FunctionDef name:has_permission arguments arg:self Assign Call call:get_permission_required Return return:yes" - }, - { - "library": "pytorch", - "name": "expires", - "source_code": "@contextmanager def expires(after: float, scope: Optional[str] = None, client: Optional[TimerClient] = None): if client is None: if _timer_client is None: raise RuntimeError('Configure timer client before using countdown timers.') client = _timer_client if scope is None: caller = getframeinfo(stack()[1][0]) scope = f'{caller.filename}#{caller.lineno}' expiration = time.time() + after client.acquire(scope, expiration) try: yield finally: client.release(scope)", - "docstring": "Acquires a countdown timer that expires in `` that the client talks to will ultimately make the decision when and how to reap the workers with expired timers. Usage:: torch.distributed.elastic.timer.configure(LocalTimerClient()) with expires(after=10): torch.distributed.all_reduce(...)", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\elastic\\timer\\api.py", - "ast_data": "FunctionDef name:expires arguments arg:after type:float arg:scope type:Optional[str] arg:client type:Optional[TimerClient] If Compare op:Is If Compare op:Is Raise raises:RuntimeError('Configure timer client before using countdown timers.') Assign If Compare op:Is Assign Call call:getframeinfo Assign Assign Try" - }, - { - "library": "pytorch", - "name": "cache", - "source_code": "def cache(self, src: str, globals: dict[str, Any], co_fields = None): key = self._get_key() if co_fields: key + = f' from {co_fields['co_filename']}: {co_fields['co_firstlineno']} in {co_fields['co_name']}' self.eval_cache[key] = src globals_copy = globals.copy() globals_copy['__file__'] = key globals_copy['__name__'] = key globals_copy['__loader__'] = self linecache.lazycache(key, globals_copy) return key", - "docstring": "Store the source in a private cache, and add a lazy entry in linecache that allows the source to be retrieved by 'filename'. Args: src (str): The module source to cache globals (dict): The module globals Returns: str: The cache key (and dummy filename) generated for src.", - "type": "method", - "file_path": "pytorch\\torch\\fx\\graph_module.py", - "ast_data": "FunctionDef name:cache arguments arg:self arg:src type:str arg:globals type:dict[str, Any] arg:co_fields Assign Call call:_get_key If Assign Assign Call call:copy Assign Assign Assign Return return:yes" - }, - { - "library": "numpy", - "name": "accumulate", - "source_code": "def accumulate(self, target, axis = 0): tclass = get_masked_subclass(target) t = filled(target, self.filly) result = self.f.accumulate(t, axis) masked_result = result.view(tclass) return masked_result", - "docstring": "Accumulate along after filling with y fill value.", - "type": "method", - "file_path": "numpy\\numpy\\ma\\core.py", - "ast_data": "FunctionDef name:accumulate arguments arg:self arg:target arg:axis Assign Call call:get_masked_subclass Assign Call call:filled Assign Call call:accumulate Assign Call call:view Return return:yes" - }, - { - "library": "scrapy", - "name": "send_data", - "source_code": "def send_data(self) -> None: if self.metadata['stream_closed_local']: raise StreamClosedError(self.stream_id) window_size = self._protocol.conn.local_flow_control_window(stream_id = self.stream_id) max_frame_size = self._protocol.conn.max_outbound_frame_size bytes_to_send_size = min(window_size, self.metadata['remaining_content_length']) while bytes_to_send_size > 0: chunk_size = min(bytes_to_send_size, max_frame_size) data_chunk_start_id = self.metadata['request_content_length'] - self.metadata['remaining_content_length'] data_chunk = self._request.body[data_chunk_start_id: data_chunk_start_id + chunk_size] self._protocol.conn.send_data(self.stream_id, data_chunk, end_stream = False) bytes_to_send_size - = chunk_size self.metadata['remaining_content_length'] - = chunk_size self.metadata['remaining_content_length'] = max(0, self.metadata['remaining_content_length']) if self.metadata['remaining_content_length'] = = 0: self._protocol.conn.end_stream(self.stream_id)", - "docstring": "Called immediately after the headers are sent. Here we send all the data as part of the request. If the content length is 0 initially then we end the stream immediately and wait for response data. Warning: Only call this method when stream not closed from client side and has initiated request already by sending HEADER frame. If not then stream will raise ProtocolError (raise by h2 state machine).", - "type": "method", - "file_path": "scrapy\\scrapy\\core\\http2\\stream.py", - "ast_data": "FunctionDef name:send_data arguments arg:self If Raise raises:StreamClosedError(self.stream_id) Assign Call call:local_flow_control_window Assign Assign Call call:min While Compare op:Gt Assign Call call:min Assign Assign Assign Call call:max If Compare op:Eq" - }, - { - "library": "sphinx", - "name": "merge_other", - "source_code": "def merge_other(self, app: Sphinx, env: BuildEnvironment, docnames: Set[str], other: BuildEnvironment) -> None: raise NotImplementedError", - "docstring": "Merge in specified data regarding docnames from a different object which coming from a subprocess in parallel builds. .. seealso:: :event:", - "type": "method", - "file_path": "sphinx\\sphinx\\environment\\collectors\\__init__.py", - "ast_data": "FunctionDef name:merge_other arguments arg:self arg:app type:Sphinx arg:env type:BuildEnvironment arg:docnames type:Set[str] arg:other type:BuildEnvironment Raise raises:NotImplementedError" - }, - { - "library": "tensorflow", - "name": "is_simple_variable", - "source_code": "def is_simple_variable(self) -> bool: attributes = self.object_proto.attributes return len(attributes) = = 1 and attributes[0].name = = constants.VARIABLE_VALUE_KEY and (not self.object_proto.children)", - "docstring": "Determine whether this value is restorable with a Tensor initializer.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\restore.py", - "ast_data": "FunctionDef name:is_simple_variable arguments arg:self Assign Return return:yes" - }, - { - "library": "flexx", - "name": "encode_type_id", - "source_code": "def encode_type_id(b, ext_id): if ext_id is not None: bb = ext_id.encode('UTF-8') return b.upper() + lencode(len(bb)) + bb else: return b", - "docstring": "Encode the type identifier, with or without extension id.", - "type": "function", - "file_path": "flexx\\flexx\\app\\bsdf_lite.py", - "ast_data": "FunctionDef name:encode_type_id arguments arg:b arg:ext_id If Compare op:IsNot Assign Call call:encode Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "DblFromGeom", - "source_code": "class DblFromGeom(GEOSFuncFactory): restype = c_int errcheck = staticmethod(check_dbl)", - "docstring": "Argument is a Geometry, return type is double that is passed in by reference as the last argument.", - "type": "class", - "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\misc.py", - "ast_data": "ClassDef name:DblFromGeom Assign Assign Call call:staticmethod" - }, - { - "library": "pytorch", - "name": "DefaultState", - "source_code": "class DefaultState: __slots__ = ['process_group', 'world_size', 'gradient_predivide_factor', 'gradient_postdivide_factor'] def __init__(self, process_group: dist.ProcessGroup): if process_group is None: raise ValueError(f'Expected to pass in an explicit ProcessGroup to {self}.') self.process_group = process_group self.world_size = dist.get_world_size(process_group) self.gradient_predivide_factor = self._get_gradient_predivide_factor(self.world_size) self.gradient_postdivide_factor = self.world_size / self.gradient_predivide_factor @staticmethod def _get_gradient_predivide_factor(world_size: int) -> float: factor: int = 1 while world_size % factor = = 0 and world_size / factor > factor: factor * = 2 return float(factor)", - "docstring": "Stores state needed to perform the default communication algorithm within a communication hook. Args: process_group (ProcessGroup): The process group to be used.", - "type": "class", - "file_path": "pytorch\\torch\\distributed\\algorithms\\_comm_hooks\\default_hooks.py", - "ast_data": "ClassDef name:DefaultState Assign FunctionDef name:__init__ arguments arg:self arg:process_group type:dist.ProcessGroup If Compare op:Is Raise raises:ValueError(f'Expected to pass in an explicit ProcessGroup to {self}.') Assign Assign Call call:get_world_size Assign Call call:_get_gradient_predivide_factor Assign FunctionDef name:_get_gradient_predivide_factor arguments arg:world_size type:int While BoolOp Compare op:Eq Compare op:Gt Return return:yes" - }, - { - "library": "scikit-learn", - "name": "predict_proba", - "source_code": "def predict_proba(self, X): check_is_fitted(self) mean_proba = np.zeros((_num_samples(X), len(self.classes_))) for calibrated_classifier in self.calibrated_classifiers_: proba = calibrated_classifier.predict_proba(X) mean_proba + = proba mean_proba / = len(self.calibrated_classifiers_) return mean_proba", - "docstring": "Calibrated probabilities of classification. This function returns calibrated probabilities of classification according to each class on an array of test vectors X. Parameters ---------- X : array-like of shape (n_samples, n_features) The samples, as accepted by . Returns ------- C : ndarray of shape (n_samples, n_classes) The predicted probas.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\calibration.py", - "ast_data": "FunctionDef name:predict_proba arguments arg:self arg:X Assign Call call:zeros For Assign Call call:predict_proba Return return:yes" - }, - { - "library": "scikit-learn", - "name": "is_python_scalar", - "source_code": "def is_python_scalar(x: object) -> TypeIs[complex]: return isinstance(x, int | float | complex) and (not is_numpy_array(x))", - "docstring": "Return True if is a Python scalar, False otherwise.", - "type": "function", - "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_utils\\_helpers.py", - "ast_data": "FunctionDef name:is_python_scalar arguments arg:x type:object Return return:yes" - }, - { - "library": "tensorflow", - "name": "check_with_golden", - "source_code": "def check_with_golden(filename): path_to_file = PATH_TO_DIR + '/data/' + filename if os.path.isfile(path_to_file) and os.path.isfile(CUDA_CC_GOLDEN_DIR): with open(path_to_file, 'r') as f_new: with open(CUDA_CC_GOLDEN_DIR, 'r') as f_golden: diff = difflib.unified_diff(f_new.readlines(), f_golden.readlines(), fromfile = path_to_file, tofile = CUDA_CC_GOLDEN_DIR) diff_list = [] for line in diff: diff_list.append(line) if diff_list: print('WARNING: difference(s) found between new csv and golden csv.') print(diff_list) else: print('No difference found between new csv and golen csv.')", - "docstring": "Checks the newly created CUDA compute capability file with the golden. If differences are found, then it prints a list of all mismatches as a . Golden file must reside in directory. Args: filename: String that is the name of the newly created file.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\data\\cuda_compute_capability.py", - "ast_data": "FunctionDef name:check_with_golden arguments arg:filename Assign If BoolOp Call call:isfile Call call:isfile With With Assign Call call:unified_diff Assign For If" - }, - { - "library": "scikit-learn", - "name": "fit", - "source_code": "def fit(self, K, y = None): xp, _ = get_namespace(K) K = validate_data(self, K, dtype = _array_api.supported_float_dtypes(xp)) if K.shape[0] ! = K.shape[1]: raise ValueError('Kernel matrix must be a square matrix. Input is a {}x{} matrix.'.format(K.shape[0], K.shape[1])) n_samples = K.shape[0] self.K_fit_rows_ = xp.sum(K, axis = 0) / n_samples self.K_fit_all_ = xp.sum(self.K_fit_rows_) / n_samples return self", - "docstring": "Fit KernelCenterer. Parameters ---------- K : ndarray of shape (n_samples, n_samples) Kernel matrix. y : None Ignored. Returns ------- self : object Returns the instance itself.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py", - "ast_data": "FunctionDef name:fit arguments arg:self arg:K arg:y Assign Call call:get_namespace Assign Call call:validate_data If Compare op:NotEq Raise raises:ValueError('Kernel matrix must be a square matrix. Input is a {}x{} matrix.'.format(K.shape[0], K.shape[1])) Assign Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_function", - "source_code": "def get_function(name, entries): contents = '\\nabsl: : optional> {name}(\\n const tensorflow: : string &op_name) {{\\n static std: : array a = {{{{\\n'.format(name = name, count = len(entries) + 1) contents + = ' ' contents + = '\\n '.join((entries[op_type] for op_type in sorted(entries))) contents + = '\\n {\"VarHandleOp\"}, ' contents + = '\\n }};\\n static const auto &m = *OpGradientInfoInit(a);\\n\\n auto it = m.find(op_name);\\n if (it ! = m.end()) {\\n return it->second;\\n }\\n return absl: : nullopt;\\n}\\n' return contents", - "docstring": "Generates lookup function with given name and lookup table entries.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\eager\\gradient_input_output_exclusions.py", - "ast_data": "FunctionDef name:get_function arguments arg:name arg:entries Assign Call call:format Return return:yes" - }, - { - "library": "tensorflow", - "name": "do_not_descend_map", - "source_code": "@property def do_not_descend_map(self): return self._do_not_descend_map", - "docstring": "A map from parents to symbols that should not be descended into. This map can be edited, but it should not be edited once traversal has begun. Returns: The map marking symbols to not explore.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\tools\\common\\public_api.py", - "ast_data": "FunctionDef name:do_not_descend_map arguments arg:self Return return:yes" - }, - { - "library": "virtualenv", - "name": "VirtualenvBuiltin", - "source_code": "class VirtualenvBuiltin(Creator, Describe, ABC): def __init__(self, options, interpreter) -> None: Creator.__init__(self, options, interpreter) Describe.__init__(self, self.dest, interpreter)", - "docstring": "A creator that does operations itself without delegation, if we can create it we can also describe it.", - "type": "class", - "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\builtin\\builtin_way.py", - "ast_data": "ClassDef name:VirtualenvBuiltin FunctionDef name:__init__ arguments arg:self arg:options arg:interpreter" - }, - { - "library": "pytorch", - "name": "MSELoss", - "source_code": "class MSELoss(_Loss): __constants__ = ['reduction'] def __init__(self, size_average = None, reduce = None, reduction: str = 'mean') -> None: super().__init__(size_average, reduce, reduction) def forward(self, input: Tensor, target: Tensor) -> Tensor: return F.mse_loss(input, target, reduction = self.reduction)", - "docstring": "Creates a criterion that measures the mean squared error (squared L2 norm) between each element in the input :math: and target :math:. The unreduced (i.e. with :attr: set to `Nreductionmean';}\\\\ \\operatorname{sum}(L), & \\text{if reduction} = \\text{xyNNNreductionsize_averagereducereductionsize_averagereducesize_averagesize_averagereducereduction(*)*(*)`, same shape as the input. Examples: >>> loss = nn.MSELoss() >>> input = torch.randn(3, 5, requires_grad=True) >>> target = torch.randn(3, 5) >>> output = loss(input, target) >>> output.backward()", - "type": "class", - "file_path": "pytorch\\torch\\nn\\modules\\loss.py", - "ast_data": "ClassDef name:MSELoss Assign FunctionDef name:__init__ arguments arg:self arg:size_average arg:reduce arg:reduction type:str FunctionDef name:forward arguments arg:self arg:input type:Tensor arg:target type:Tensor Return return:yes" - }, - { - "library": "kornia", - "name": "from_config", - "source_code": "@staticmethod @abstractmethod def from_config(config: ModelConfig) -> ModelBase[ModelConfig]: raise NotImplementedError", - "docstring": "Build/load the model. Args: config: The specifications for the model be build/loaded", - "type": "method", - "file_path": "kornia\\kornia\\contrib\\models\\base.py", - "ast_data": "FunctionDef name:from_config arguments arg:config type:ModelConfig Raise raises:NotImplementedError" - }, - { - "library": "tensorflow", - "name": "embedding_tables", - "source_code": "@property def embedding_tables(self) -> Dict[tpu_embedding_v2_utils.TableConfig, tf_variables.Variable]: self._maybe_build() return {stacked_table_name: self._variables[stacked_table_name]['parameters'] for stacked_table_name in self._stacked_table_to_tables}", - "docstring": "Returns a dict of embedding tables, keyed by .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3.py", - "ast_data": "FunctionDef name:embedding_tables arguments arg:self Return return:yes" - }, - { - "library": "sphinx", - "name": "escape_arg", - "source_code": "def escape_arg(self, s: str) -> str: s = self.escape(s) s = s.replace(', ', '@comma{}') s = ' '.join(s.split()).strip() return s", - "docstring": "Return an escaped string suitable for use as an argument to a Texinfo command.", - "type": "method", - "file_path": "sphinx\\sphinx\\writers\\texinfo.py", - "ast_data": "FunctionDef name:escape_arg arguments arg:self arg:s type:str Assign Call call:escape Assign Call call:replace Assign Call call:strip Return return:yes" - }, - { - "library": "numpy", - "name": "make_hg_version_py", - "source_code": "def make_hg_version_py(self, delete = True): target = njoin(self.local_path, '__hg_version__.py') revision = self._get_hg_revision(self.local_path) if os.path.isfile(target) or revision is None: return else: def generate_hg_version_py(): if not os.path.isfile(target): version = str(revision) self.info('Creating %s (version = %r)' % (target, version)) with open(target, 'w') as f: f.write('version = %r\\n' % version) def rm_file(f = target, p = self.info): if delete: try: os.remove(f) p('removed ' + f) except OSError: pass try: os.remove(f + 'c') p('removed ' + f + 'c') except OSError: pass atexit.register(rm_file) return target self.add_data_files(('', generate_hg_version_py()))", - "docstring": "Appends a data function to the data_files list that will generate __hg_version__.py file to the current package directory. Generate package __hg_version__.py file from Mercurial revision, it will be removed after python exits but will be available when sdist, etc commands are executed. Notes ----- If __hg_version__.py existed before, nothing is done. This is intended for working with source directories that are in an Mercurial repository.", - "type": "method", - "file_path": "numpy\\numpy\\distutils\\misc_util.py", - "ast_data": "FunctionDef name:make_hg_version_py arguments arg:self arg:delete Assign Call call:njoin Assign Call call:_get_hg_revision If BoolOp Call call:isfile Compare op:Is Return return:no FunctionDef name:generate_hg_version_py arguments If Assign Call call:str With FunctionDef name:rm_file arguments arg:f arg:p If Try ExceptHandler Try ExceptHandler Return return:yes" - }, - { - "library": "pytorch", - "name": "display_snapshot", - "source_code": "def display_snapshot(self, type: str = 'current', units: str = 'B', tabulate: bool = False) -> None: snapshot = self.get_tracker_snapshot(type) if tabulate: _print_snapshot_tabular(snapshot, units) else: _print_snapshot(snapshot, units)", - "docstring": "Display the memory usage breakdown snapshot of the tracker based on the specified type and units. Keyword args: type (str): The type of snapshot to display. Can be \"current\" for the current memory usage or \"peak\" for the peak memory usage. Defaults to \"current\". units (str): The units to use for displaying memory usage. Defaults to \"B\". Supports [\"B\", \"KiB\", \"MiB\", \"GiB\"]. tabulate (bool): Whether to display the snapshot in a tabular format. Defaults to False.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\_tools\\mem_tracker.py", - "ast_data": "FunctionDef name:display_snapshot arguments arg:self arg:type type:str arg:units type:str arg:tabulate type:bool Assign Call call:get_tracker_snapshot If" - }, - { - "library": "scipy", - "name": "set_jac_params", - "source_code": "def set_jac_params(self, *args): self.jac_params = args return self", - "docstring": "Set extra parameters for user-supplied function jac.", - "type": "method", - "file_path": "scipy\\scipy\\integrate\\_ode.py", - "ast_data": "FunctionDef name:set_jac_params arguments arg:self vararg:args Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "flatten_args_detach", - "source_code": "def flatten_args_detach(args): flat_detached_args = [] def extract_tensor_args(a): nonlocal flat_detached_args if isinstance(a, torch.Tensor): val = a.detach().requires_grad_(a.requires_grad) flat_detached_args.append(val) return val else: flat_detached_args.append(a) return a new_args = fx.node.map_aggregate(args, extract_tensor_args) return (new_args, flat_detached_args)", - "docstring": "Flatten the args into a list form and detach the tensors from computational graph.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\pipelining\\_utils.py", - "ast_data": "FunctionDef name:flatten_args_detach arguments arg:args Assign FunctionDef name:extract_tensor_args arguments arg:a If Call call:isinstance Assign Call call:requires_grad_ Return return:yes Return return:yes Assign Call call:map_aggregate Return return:yes" - }, - { - "library": "pytorch", - "name": "match_score", - "source_code": "@property def match_score(self) -> int | None: return self._matching_score", - "docstring": "The matching score of the OnnxSchemaChecker . If this remains None, it means the matching score has not been calculated, and it's not a nearest match candidate. Returns: The matching score of the OnnxSchemaChecker .", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\onnxfunction_dispatcher.py", - "ast_data": "FunctionDef name:match_score arguments arg:self Return return:yes" - }, - { - "library": "mongo", - "name": "collation", - "source_code": "def collation(self, collation: Optional[_CollationIn]) -> AsyncCursor[_DocumentType]: self._check_okay_to_chain() self._collation = validate_collation_or_none(collation) return self", - "docstring": "Adds a :class: to this query. Raises :exc: if is not an instance of :class: or a `~pymongo.errors.InvalidOperationAsyncCursor~pymongo.collation.Collation`.", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\cursor.py", - "ast_data": "FunctionDef name:collation arguments arg:self arg:collation type:Optional[_CollationIn] Assign Call call:validate_collation_or_none Return return:yes" - }, - { - "library": "django", - "name": "get_user_permissions", - "source_code": "def get_user_permissions(self, obj = None): return _user_get_permissions(self, obj, 'user')", - "docstring": "Return a list of permission strings that this user has directly. Query all available auth backends. If an object is passed in, return only permissions matching this object.", - "type": "method", - "file_path": "django\\django\\contrib\\auth\\models.py", - "ast_data": "FunctionDef name:get_user_permissions arguments arg:self arg:obj Return return:yes" - }, - { - "library": "django", - "name": "execute_wrapper", - "source_code": "@contextmanager def execute_wrapper(self, wrapper): self.execute_wrappers.append(wrapper) try: yield finally: self.execute_wrappers.pop()", - "docstring": "Return a context manager under which the wrapper is applied to suitable database query executions.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\base.py", - "ast_data": "FunctionDef name:execute_wrapper arguments arg:self arg:wrapper Try" - }, - { - "library": "pandas", - "name": "components", - "source_code": "@property def components(self) -> DataFrame: return self._get_values().components.set_index(self._parent.index).__finalize__(self._parent)", - "docstring": "Return a Dataframe of the components of the Timedeltas. Each row of the DataFrame corresponds to a Timedelta in the original Series and contains the individual components (days, hours, minutes, seconds, milliseconds, microseconds, nanoseconds) of the Timedelta. Returns ------- DataFrame See Also -------- TimedeltaIndex.components : Return a DataFrame of the individual resolution components of the Timedeltas. Series.dt.total_seconds : Return the total number of seconds in the duration. Examples -------- >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit=\"s\")) >>> s 0 0 days 00:00:00 1 0 days 00:00:01 2 0 days 00:00:02 3 0 days 00:00:03 4 0 days 00:00:04 dtype: timedelta64[ns] >>> s.dt.components days hours minutes seconds milliseconds microseconds nanoseconds 0 0 0 0 0 0 0 0 1 0 0 0 1 0 0 0 2 0 0 0 2 0 0 0 3 0 0 0 3 0 0 0 4 0 0 0 4 0 0 0", - "type": "method", - "file_path": "pandas\\pandas\\core\\indexes\\accessors.py", - "ast_data": "FunctionDef name:components arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "custom_sharding_spec_op", - "source_code": "def custom_sharding_spec_op(sharding_spec_class, func): class_name = sharding_spec_class.__qualname__ if class_name not in _CUSTOM_SHARDING_SPEC_OPS: _CUSTOM_SHARDING_SPEC_OPS[class_name] = {} return functools.partial(_decorator_func, op = func, op_table = _CUSTOM_SHARDING_SPEC_OPS[class_name])", - "docstring": "Decorator to allow custom registration of ops. Args: sharding_spec_class(type): The ShardingSpec for which we need to add this custom op. func(Callable): The op to override (ex: torch.bmm)", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\api.py", - "ast_data": "FunctionDef name:custom_sharding_spec_op arguments arg:sharding_spec_class arg:func Assign If Compare op:NotIn Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "build_wheel", - "source_code": "def build_wheel(dir_path: str, cwd: str, project_name: str, platform: str, collab: str = False) -> None: env = os.environ.copy() if is_windows(): env['HOMEPATH'] = 'C: ' env['project_name'] = project_name if collab = = 'True': env['collaborator_build'] = True subprocess.run([sys.executable, 'tensorflow/tools/pip_package/setup.py', 'bdist_wheel', f'--dist-dir = {dir_path}', f'--plat-name = {platform}'], check = True, cwd = cwd, env = env)", - "docstring": "Build the wheel in the target directory. Args: dir_path: directory where the wheel will be stored cwd: path to directory with wheel source files project_name: name to pass to setup.py. platform: platform name to pass to setup.py. collab: defines if this is a collab build", - "type": "function", - "file_path": "tensorflow\\tensorflow\\tools\\pip_package\\build_pip_package.py", - "ast_data": "FunctionDef name:build_wheel arguments arg:dir_path type:str arg:cwd type:str arg:project_name type:str arg:platform type:str arg:collab type:str Assign Call call:copy If Call call:is_windows Assign Assign If Compare op:Eq Assign" - }, - { - "library": "django", - "name": "migration_plan", - "source_code": "def migration_plan(self, targets, clean_start = False): plan = [] if clean_start: applied = {} else: applied = dict(self.loader.applied_migrations) for target in targets: if target[1] is None: for root in self.loader.graph.root_nodes(): if root[0] = = target[0]: for migration in self.loader.graph.backwards_plan(root): if migration in applied: plan.append((self.loader.graph.nodes[migration], True)) applied.pop(migration) elif self.loader.replace_migrations and target not in self.loader.graph.node_map: self.loader.replace_migrations = False self.loader.build_graph() return self.migration_plan(targets, clean_start = clean_start) elif target in applied: next_in_app = sorted((n for n in self.loader.graph.node_map[target].children if n[0] = = target[0])) for node in next_in_app: for migration in self.loader.graph.backwards_plan(node): if migration in applied: plan.append((self.loader.graph.nodes[migration], True)) applied.pop(migration) else: for migration in self.loader.graph.forwards_plan(target): if migration not in applied: plan.append((self.loader.graph.nodes[migration], False)) applied[migration] = self.loader.graph.nodes[migration] return plan", - "docstring": "Given a set of targets, return a list of (Migration instance, backwards?).", - "type": "method", - "file_path": "django\\django\\db\\migrations\\executor.py", - "ast_data": "FunctionDef name:migration_plan arguments arg:self arg:targets arg:clean_start Assign If Assign Assign Call call:dict For If Compare op:Is For Call call:root_nodes If Compare op:Eq For Call call:backwards_plan If Compare op:In If BoolOp Compare op:NotIn Assign Return return:yes If Compare op:In Assign Call call:sorted For For Call call:backwards_plan If Compare op:In For Call call:forwards_plan If Compare op:NotIn Assign Return return:yes" - }, - { - "library": "scipy", - "name": "LagrangianHessian", - "source_code": "class LagrangianHessian: def __init__(self, n, objective_hess, constraints_hess): self.n = n self.objective_hess = objective_hess self.constraints_hess = constraints_hess def __call__(self, x, v_eq, v_ineq = None): if v_ineq is None: v_ineq = np.empty(0) H_objective = self.objective_hess(x) H_constraints = self.constraints_hess(x, v_eq, v_ineq) def matvec(p): return H_objective.dot(p) + H_constraints.dot(p) return LinearOperator((self.n, self.n), matvec)", - "docstring": "The Hessian of the Lagrangian as LinearOperator. The Lagrangian is computed as the objective function plus all the constraints multiplied with some numbers (Lagrange multipliers).", - "type": "class", - "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\minimize_trustregion_constr.py", - "ast_data": "ClassDef name:LagrangianHessian FunctionDef name:__init__ arguments arg:self arg:n arg:objective_hess arg:constraints_hess Assign Assign Assign FunctionDef name:__call__ arguments arg:self arg:x arg:v_eq arg:v_ineq If Compare op:Is Assign Call call:empty Assign Call call:objective_hess Assign Call call:constraints_hess FunctionDef name:matvec arguments arg:p Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "roots", - "source_code": "def roots(self): k = self._data[5] if k = = 3: t = self._eval_args[0] mest = 3 * (len(t) - 7) with FITPACK_LOCK: return _fitpack_impl.sproot(self._eval_args, mest = mest) raise NotImplementedError('finding roots unsupported for non-cubic splines')", - "docstring": "Return the zeros of the spline. Notes ----- Restriction: only cubic splines are supported by FITPACK. For non-cubic splines, use (see below for an example). Examples -------- For some data, this method may miss a root. This happens when one of the spline knots (which FITPACK places automatically) happens to coincide with the true root. A workaround is to convert to , which uses a different root-finding algorithm. For example, >>> x = [1.96, 1.97, 1.98, 1.99, 2.00, 2.01, 2.02, 2.03, 2.04, 2.05] >>> y = [-6.365470e-03, -4.790580e-03, -3.204320e-03, -1.607270e-03, ... 4.440892e-16, 1.616930e-03, 3.243000e-03, 4.877670e-03, ... 6.520430e-03, 8.170770e-03] >>> from scipy.interpolate import UnivariateSpline >>> spl = UnivariateSpline(x, y, s=0) >>> spl.roots() array([], dtype=float64) Converting to a PPoly object does find the roots at : >>> from scipy.interpolate import splrep, PPoly >>> tck = splrep(x, y, s=0) >>> ppoly = PPoly.from_spline(tck) >>> ppoly.roots(extrapolate=False) array([2.]) See Also -------- sproot PPoly.roots", - "type": "method", - "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py", - "ast_data": "FunctionDef name:roots arguments arg:self Assign If Compare op:Eq Assign Assign With Return return:yes Raise raises:NotImplementedError('finding roots unsupported for non-cubic splines')" - }, - { - "library": "django", - "name": "page_range", - "source_code": "@property def page_range(self): return range(1, self.num_pages + 1)", - "docstring": "Return a 1-based range of pages for iterating through within a template for loop.", - "type": "method", - "file_path": "django\\django\\core\\paginator.py", - "ast_data": "FunctionDef name:page_range arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "swap_tensor", - "source_code": "def swap_tensor(self, name: str, value: torch.Tensor, allow_missing: bool = False) -> torch.Tensor: prefix, _, attr = name.rpartition('.') return swap_tensor(self.get_submodule(prefix), attr, value, allow_missing = allow_missing)", - "docstring": "Swap the attribute specified by the given path to value. For example, to swap the attribute mod.layer1.conv1.weight, use accessor.swap_tensor(\"layer1.conv1.weight\", value)", - "type": "method", - "file_path": "pytorch\\torch\\nn\\utils\\_named_member_accessor.py", - "ast_data": "FunctionDef name:swap_tensor arguments arg:self arg:name type:str arg:value type:torch.Tensor arg:allow_missing type:bool Assign Call call:rpartition Return return:yes" - }, - { - "library": "flexx", - "name": "publish", - "source_code": "def publish(self, name, token, url = None): d = self.dump('index.html', 2) f = io.BytesIO() with zipfile.ZipFile(f, 'w') as zf: for fname in d.keys(): zf.writestr(fname, d[fname]) try: import requests except ImportError: raise ImportError('App.publish() needs requests lib: pip install requests') url = url or 'http: //flexx.app/submit/{name}/{token}' real_url = url.format(name = name, token = token) r = requests.post(real_url, data = f.getvalue()) if r.status_code ! = 200: raise RuntimeError('Publish failed: ' + r.text) else: print('Publish succeeded, ' + r.text) if url.startswith('http: //flexx.app'): print('You app is now available at http: //flexx.app/open/%s/' % name)", - "docstring": "Publish this app as static HTML on the web. This is an experimental feature! We will try to keep your app published, but make no guarantees. We reserve the right to remove apps or shut down the web server completely. Arguments: name (str): The name by which to publish this app. Must be unique within the scope of the published site. token (str): a secret token. This is stored at the target website. Subsequent publications of the same app name must have the same token. url (str): The url to POST the app to. If None (default), the default Flexx live website url will be used.", - "type": "method", - "file_path": "flexx\\flexx\\app\\_app.py", - "ast_data": "FunctionDef name:publish arguments arg:self arg:name arg:token arg:url Assign Call call:dump Assign Call call:BytesIO With For Call call:keys Try ExceptHandler Raise raises:ImportError('App.publish() needs requests lib: pip install requests') Assign BoolOp Assign Call call:format Assign Call call:post If Compare op:NotEq Raise raises:RuntimeError('Publish failed: ' + r.text) If Call call:startswith" - }, - { - "library": "tensorflow", - "name": "apply_aggregation_replica_context", - "source_code": "def apply_aggregation_replica_context(value, aggregation, destinations): if isinstance(value, DistributedValues): raise TypeError('Cannot use DistributedValues to update variables in replica context.') if not tensor_util.is_tf_type(value): return value if aggregation = = vs.VariableAggregation.ONLY_FIRST_REPLICA: def merge_fn(strategy, value): return strategy.extended.broadcast_to(strategy.experimental_local_results(value)[0], destinations = destinations) return distribute_lib.get_replica_context().merge_call(merge_fn, args = (value,)) else: reduce_op = reduce_util.ReduceOp.from_variable_aggregation(aggregation) aggregated_value = distribute_lib.get_strategy().extended._replica_ctx_all_reduce(reduce_op, value) return aggregated_value", - "docstring": "Aggregate to as specified by .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", - "ast_data": "FunctionDef name:apply_aggregation_replica_context arguments arg:value arg:aggregation arg:destinations If Call call:isinstance Raise raises:TypeError('Cannot use DistributedValues to update variables in replica context.') If Return return:yes If Compare op:Eq FunctionDef name:merge_fn arguments arg:strategy arg:value Return return:yes Return return:yes Assign Call call:from_variable_aggregation Assign Call call:_replica_ctx_all_reduce Return return:yes" - }, - { - "library": "tensorflow", - "name": "validate_saveables_for_saved_model", - "source_code": "def validate_saveables_for_saved_model(saveables, obj): if isinstance(obj, python_state.PythonState): logging.warn(f'Note that object {obj} stores python values into the checkpoint. These values will not be restored when loading the SavedModel into python.') return [] if any((isinstance(saveable, trackable.NoRestoreSaveable) for saveable in saveables)): return [] return saveables", - "docstring": "Makes sure SaveableObjects are compatible with SavedModel.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py", - "ast_data": "FunctionDef name:validate_saveables_for_saved_model arguments arg:saveables arg:obj If Call call:isinstance Return return:yes If Call call:any Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "do_if", - "source_code": "@register.tag('if') def do_if(parser, token): bits = token.split_contents()[1:] condition = TemplateIfParser(parser, bits).parse() nodelist = parser.parse(('elif', 'else', 'endif')) conditions_nodelists = [(condition, nodelist)] token = parser.next_token() while token.contents.startswith('elif'): bits = token.split_contents()[1:] condition = TemplateIfParser(parser, bits).parse() nodelist = parser.parse(('elif', 'else', 'endif')) conditions_nodelists.append((condition, nodelist)) token = parser.next_token() if token.contents = = 'else': nodelist = parser.parse(('endif',)) conditions_nodelists.append((None, nodelist)) token = parser.next_token() if token.contents ! = 'endif': raise TemplateSyntaxError('Malformed template tag at line {}: \"{}\"'.format(token.lineno, token.contents)) return IfNode(conditions_nodelists)", - "docstring": "Evaluate a variable, and if that variable is \"true\" (i.e., exists, is not empty, and is not a false boolean value), output the contents of the block: :: {% if athlete_list %} Number of athletes: {{ athlete_list|count }} {% elif athlete_in_locker_room_list %} Athletes should be out of the locker room soon! {% else %} No athletes. {% endif %} In the above, if ``. Operator precedence follows Python.", - "type": "function", - "file_path": "django\\django\\template\\defaulttags.py", - "ast_data": "FunctionDef name:do_if arguments arg:parser arg:token Call call:tag Assign Assign Call call:parse Assign Call call:parse Assign Assign Call call:next_token While Call call:startswith Assign Assign Call call:parse Assign Call call:parse Assign Call call:next_token If Compare op:Eq Assign Call call:parse Assign Call call:next_token If Compare op:NotEq Raise raises:TemplateSyntaxError('Malformed template tag at line {}: \"{}\"'.format(token.lineno, token.contents)) Return return:yes" - }, - { - "library": "pytorch", - "name": "do_partition", - "source_code": "def do_partition(self) -> GraphModule: module_with_submodules = split_module(self.graph_module, self.torch_module, lambda node: self.node_to_partition[node]) return module_with_submodules", - "docstring": "Return a new fx module with submodule nodes (partitions).", - "type": "method", - "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py", - "ast_data": "FunctionDef name:do_partition arguments arg:self Assign Call call:split_module Return return:yes" - }, - { - "library": "pytorch", - "name": "get_unique_failures", - "source_code": "def get_unique_failures(self, jobs: list[Any]) -> dict[str, list[Any]]: failures = defaultdict(list) for job in jobs: if job['conclusion'] = = 'failure': found_similar_failure = False if 'failureCaptures' not in job: failures['unclassified'] = [job] continue failureCaptures = ' '.join(job['failureCaptures']) for failure in failures: seq = SequenceMatcher(None, failureCaptures, failure) if seq.ratio() > SIMILARITY_THRESHOLD: failures[failure].append(job) found_similar_failure = True break if not found_similar_failure: failures[failureCaptures] = [job] return failures", - "docstring": "Returns list of jobs grouped by failureCaptures from the input list", - "type": "method", - "file_path": "pytorch\\tools\\alerts\\create_alerts.py", - "ast_data": "FunctionDef name:get_unique_failures arguments arg:self arg:jobs type:list[Any] Assign Call call:defaultdict For If Compare op:Eq Assign If Compare op:NotIn Assign Assign Call call:join For Assign Call call:SequenceMatcher If Compare op:Gt Assign If Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "HiddenTfApiAttribute", - "source_code": "class HiddenTfApiAttribute(property): def __init__(self, deprecation_message): def raise_error(unused_self): raise AttributeError(deprecation_message) super(HiddenTfApiAttribute, self).__init__(raise_error)", - "docstring": "Hides a class attribute from the public API. Attributes in public classes can be hidden from the API by having an '_' in front of the name (e.g. ClassName._variables). This doesn't work when attributes or methods are inherited from a parent class. To hide inherited attributes, set their values to be .", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\util\\deprecation.py", - "ast_data": "ClassDef name:HiddenTfApiAttribute FunctionDef name:__init__ arguments arg:self arg:deprecation_message FunctionDef name:raise_error arguments arg:unused_self Raise raises:AttributeError(deprecation_message)" - }, - { - "library": "tensorflow", - "name": "restore_variables", - "source_code": "def restore_variables(self, sess, saver, import_scope = None): with sess.graph.as_default(): if saver is None and (not variables._all_saveable_objects(scope = import_scope)): tf_logging.info('The specified SavedModel has no variables; no checkpoints were restored.') elif isinstance(saver, tf_saver.Saver): saver.restore(sess, self._variables_path) else: raise ValueError('No tf.train.Saver object was passed to the function `SavedModelLoader.restore_variables`. Since there are variables in the graph, a saver is required.')", - "docstring": "Restore SavedModel variable values into the session. Args: sess: tf.compat.v1.Session to restore variable values. saver: a tf.compat.v1.train.Saver object. Can be None if there are no variables in graph. This may be the saver returned by the load_graph() function, or a default . import_scope: Optional -- if specified, prepend this string followed by '/' to all loaded tensor names. This scope is applied to tensor instances loaded into the passed session, but it is *not* written through to the static protocol buffer that is returned. Raises: ValueError: if no saver was passed to the saver argument, and there are variables in the graph.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py", - "ast_data": "FunctionDef name:restore_variables arguments arg:self arg:sess arg:saver arg:import_scope With If BoolOp Compare op:Is If Call call:isinstance Raise raises:ValueError('No tf.train.Saver object was passed to the function `SavedModelLoader.restore_variables`. Since there are variables in the graph, a saver is required.')" - }, - { - "library": "tensorflow", - "name": "toco_convert", - "source_code": "@_tf_export(v1 = ['lite.toco_convert']) @deprecation.deprecated(None, 'Use `lite.TFLiteConverter` instead.') def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs): return convert_graphdef(input_data, input_tensors, output_tensors, *args, **kwargs)", - "docstring": "Convert a TensorFlow GraphDef to TFLite. This function is deprecated. Please use API instead. Conversion can be customized by providing arguments that are forwarded to and (see documentation for details). Args: input_data: Input data (i.e. often ). input_tensors: List of input tensors. Type and shape are computed using and . output_tensors: List of output tensors (only .name is used from this). *args: See and . **kwargs: See and . Returns: The converted TensorFlow Lite model in a bytes array. Raises: Defined in .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\lite\\python\\convert.py", - "ast_data": "FunctionDef name:toco_convert arguments arg:input_data arg:input_tensors arg:output_tensors vararg:args kwarg:kwargs Call call:_tf_export Call call:deprecated Return return:yes" - }, - { - "library": "pytorch", - "name": "load_spmm_dataset", - "source_code": "def load_spmm_dataset(dataset_path, hidden_size, sparsity, spmm_type, device, n_limit = math.inf): current_folder_path = f'{dataset_path}/{sparsity}' path = Path(current_folder_path) files = path.glob('**/*.smtx') print(dataset_path, hidden_size, sparsity) index = 0 x_files, y_files = ([], []) for f in files: if index > = n_limit: break print('.', end = '') size, nnz = read_matrix_params(f.as_posix()) if size[1] = = hidden_size: x_files.append(f.as_posix()) if size[0] = = hidden_size: y_files.append(f.as_posix()) index + = 1 print() for fx, fy in zip(x_files, y_files): x = load_sparse_matrix(fx, device) y = gen_matrix(fy, device) if spmm_type = = 'sparse@dense' else load_sparse_matrix(fy, device) yield (x, y)", - "docstring": "load_spmm_dataset loads a DLMC dataset for a sparse matrix-matrix multiplication (SPMM) performance test. Args: dataset_path: path of the dataset from DLMC collection. hidden_size This value allows tensors of varying sizes. sparsity: This value allows tensors of varying sparsities. spmm_type: This value allows tensors for or operations. device: Whether to place the Tensor on a GPU or CPU. n_limit: This value allows a dataset with some limit size.", - "type": "function", - "file_path": "pytorch\\benchmarks\\sparse\\dlmc\\utils.py", - "ast_data": "FunctionDef name:load_spmm_dataset arguments arg:dataset_path arg:hidden_size arg:sparsity arg:spmm_type arg:device arg:n_limit Assign Assign Call call:Path Assign Call call:glob Assign Assign For If Compare op:GtE Assign Call call:read_matrix_params If Compare op:Eq If Compare op:Eq For Call call:zip Assign Call call:load_sparse_matrix Assign" - }, - { - "library": "pytorch", - "name": "get_default_static_quant_module_mappings", - "source_code": "def get_default_static_quant_module_mappings() -> dict[Callable, Any]: return copy.deepcopy(DEFAULT_STATIC_QUANT_MODULE_MAPPINGS)", - "docstring": "Get module mapping for post training static quantization", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\quantization_mappings.py", - "ast_data": "FunctionDef name:get_default_static_quant_module_mappings arguments Return return:yes" - }, - { - "library": "numpy", - "name": "shape_as", - "source_code": "def shape_as(self, obj): if self._zerod: return None return (obj * self._arr.ndim)(*self._arr.shape)", - "docstring": "Return the shape tuple as an array of some other c-types type. For example: ``.", - "type": "method", - "file_path": "numpy\\numpy\\_core\\_internal.py", - "ast_data": "FunctionDef name:shape_as arguments arg:self arg:obj If Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "add_chunk", - "source_code": "def add_chunk(self, chunk: Union[message.Message, bytes], field_tags: util.FieldTypes, index = None) -> None: if self._parent_splitter is not None: self._parent_splitter.add_chunk(chunk, self._fields_in_parent + field_tags, index) else: assert self._chunks is not None assert self._chunked_message is not None field = self._chunked_message.chunked_fields.add(field_tag = util.get_field_tag(self._proto, field_tags)) new_chunk_index = len(self._chunks) field.message.chunk_index = new_chunk_index self._add_chunk_order.append(id(chunk)) if index is None: self._chunks.append(chunk) else: self._chunks.insert(index, chunk) self._fix_chunk_order = True", - "docstring": "Adds a new chunk and updates the ChunkedMessage proto. Args: chunk: Proto message or bytes. field_tags: Field information about the placement of the chunked data within self._proto. index: Optional index at which to insert the chunk. The chunk ordering is important for merging.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\tools\\proto_splitter\\split.py", - "ast_data": "FunctionDef name:add_chunk arguments arg:self arg:chunk type:Union[message.Message, bytes] arg:field_tags type:util.FieldTypes arg:index If Compare op:IsNot Assign Call call:add Assign Call call:len Assign If Compare op:Is Assign" - }, - { - "library": "tensorflow", - "name": "log_softmax", - "source_code": "@tf_export(v1 = ['nn.log_softmax', 'math.log_softmax']) @dispatch.register_unary_elementwise_api @dispatch.add_dispatch_support @deprecation.deprecated_args(None, 'dim is deprecated, use axis instead', 'dim') def log_softmax(logits, axis = None, name = None, dim = None): axis = deprecation.deprecated_argument_lookup('axis', axis, 'dim', dim) if axis is None: axis = -1 return _wrap_2d_function(logits, gen_nn_ops.log_softmax, axis, name)", - "docstring": "Computes log softmax activations. For each batch and class we have logsoftmax = logits - log(reduce_sum(exp(logits), axis)) Args: logits: A non-empty . Must be one of the following types: , , . axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. name: A name for the operation (optional). dim: Deprecated alias for . Returns: A . Has the same type as . Same shape as . Raises: InvalidArgumentError: if is empty or is beyond the last dimension of .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", - "ast_data": "FunctionDef name:log_softmax arguments arg:logits arg:axis arg:name arg:dim Call call:tf_export Call call:deprecated_args Assign Call call:deprecated_argument_lookup If Compare op:Is Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "isvariadic", - "source_code": "def isvariadic(obj): return isinstance(obj, VariadicSignatureType)", - "docstring": "Check whether the type is variadic. Parameters ---------- obj : type The type to check Returns ------- bool Whether or not is variadic Examples -------- >>> # xdoctest: +SKIP >>> isvariadic(int) False >>> isvariadic(Variadic[int]) True", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\variadic.py", - "ast_data": "FunctionDef name:isvariadic arguments arg:obj Return return:yes" - }, - { - "library": "django", - "name": "r", - "source_code": "def r(self): value = self.data if not isinstance(value, datetime): default_timezone = get_default_timezone() value = datetime.combine(value, time.min).replace(tzinfo = default_timezone) elif is_naive(value): value = make_aware(value, timezone = self.timezone) return format_datetime_rfc5322(value)", - "docstring": "RFC 5322 formatted date; e.g. 'Thu, 21 Dec 2000 16:01:07 +0200'", - "type": "method", - "file_path": "django\\django\\utils\\dateformat.py", - "ast_data": "FunctionDef name:r arguments arg:self Assign If Assign Call call:get_default_timezone Assign Call call:replace If Call call:is_naive Assign Call call:make_aware Return return:yes" - }, - { - "library": "kornia", - "name": "__mul__", - "source_code": "def __mul__(self, right: So2 | Tensor) -> So2 | Tensor: z = self.z if isinstance(right, So2): return So2(z * right.z) elif isinstance(right, (Vector2, Tensor)): if isinstance(right, Tensor): check_so2_t_shape(right) x = right.data[..., 0] y = right.data[..., 1] real = z.real imag = z.imag out = stack((real * x - imag * y, imag * x + real * y), -1) if isinstance(right, Tensor): return out else: return Vector2(out) else: raise TypeError(f'Not So2 or Tensor type. Got: {type(right)}')", - "docstring": "Perform a left-multiplication either rotation concatenation or point-transform. Args: right: the other So2 transformation. Return: The resulting So2 transformation.", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\liegroup\\so2.py", - "ast_data": "FunctionDef name:__mul__ arguments arg:self arg:right type:So2 | Tensor Assign If Call call:isinstance Return return:yes If Call call:isinstance If Call call:isinstance Assign Assign Assign Assign Assign Call call:stack If Call call:isinstance Return return:yes Return return:yes Raise raises:TypeError(f'Not So2 or Tensor type. Got: {type(right)}')" - }, - { - "library": "django", - "name": "iterative_dfs", - "source_code": "def iterative_dfs(self, start, forwards = True): visited = [] visited_set = set() stack = [(start, False)] while stack: node, processed = stack.pop() if node in visited_set: pass elif processed: visited_set.add(node) visited.append(node.key) else: stack.append((node, True)) stack + = [(n, False) for n in sorted(node.parents if forwards else node.children)] return visited", - "docstring": "Iterative depth-first search for finding dependencies.", - "type": "method", - "file_path": "django\\django\\db\\migrations\\graph.py", - "ast_data": "FunctionDef name:iterative_dfs arguments arg:self arg:start arg:forwards Assign Assign Call call:set Assign While Assign Call call:pop If Compare op:In If Return return:yes" - }, - { - "library": "tensorflow", - "name": "read_file_to_string", - "source_code": "def read_file_to_string(filename, binary_mode = False): if binary_mode: f = FileIO(filename, mode = 'rb') else: f = FileIO(filename, mode = 'r') return f.read()", - "docstring": "Reads the entire contents of a file to a string. Args: filename: string, path to a file binary_mode: whether to open the file in binary mode or not. This changes the type of the object returned. Returns: contents of the file as a string or bytes. Raises: errors.OpError: Raises variety of errors that are subtypes e.g. etc.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", - "ast_data": "FunctionDef name:read_file_to_string arguments arg:filename arg:binary_mode If Assign Call call:FileIO Assign Call call:FileIO Return return:yes" - }, - { - "library": "pytorch", - "name": "generate_all_broadcasting_possibilities_no_padding", - "source_code": "def generate_all_broadcasting_possibilities_no_padding(d1: list[DVar], d2: list[DVar], d11: list[DVar], d12: list[DVar]): size = len(d1) res2 = [] for i in range(size): t1 = broadcast_dim(d1, d2, d11, d12, i) t2 = broadcast_dim(d2, d1, d12, d11, i) t3 = no_broadcast_dim_with_index(d1, d2, d11, d12, i) res2.append(Disj([t1, t2, t3])) return Conj(res2)", - "docstring": "Generate broadcasting constraints assuming no padding. Broadcasting can happen at any dimension. We look at all combinations for all dimensions in d1 and d2 Args: d1: input1 dimensions d2: input2 dimensions d11: broadcasted input1 dimensions d12: broadcasted input2 dimensions Returns: broadcasting constraints relating the input dimensions to the broadcasted dimensions", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py", - "ast_data": "FunctionDef name:generate_all_broadcasting_possibilities_no_padding arguments arg:d1 type:list[DVar] arg:d2 type:list[DVar] arg:d11 type:list[DVar] arg:d12 type:list[DVar] Assign Call call:len Assign For Call call:range Assign Call call:broadcast_dim Assign Call call:broadcast_dim Assign Call call:no_broadcast_dim_with_index Return return:yes" - }, - { - "library": "matplotlib", - "name": "add_artist", - "source_code": "def add_artist(self, artist, clip = False): artist.set_figure(self) self.artists.append(artist) artist._remove_method = self.artists.remove if not artist.is_transform_set(): artist.set_transform(self.transSubfigure) if clip and artist.get_clip_path() is None: artist.set_clip_path(self.patch) self.stale = True return artist", - "docstring": "Add an to the figure. Usually artists are added to objects using ; this method can be used in the rare cases where one needs to add artists directly to the figure instead. Parameters ---------- artist : The artist to add to the figure. If the added artist has no transform previously set, its transform will be set to `~matplotlib.artist.Artist` The added artist.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\figure.py", - "ast_data": "FunctionDef name:add_artist arguments arg:self arg:artist arg:clip Assign If If BoolOp Compare op:Is Assign Return return:yes" - }, - { - "library": "cherrypy", - "name": "next", - "source_code": "def next(self): data = self.rfile.next() self.bytes_read + = len(data) return data", - "docstring": "Return next portion of bytes from the iterated file.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\lib\\cpstats.py", - "ast_data": "FunctionDef name:next arguments arg:self Assign Call call:next Return return:yes" - }, - { - "library": "sphinx", - "name": "word_filter", - "source_code": "def word_filter(self, word: str) -> bool: return not word.isdigit() and word not in self.stopwords", - "docstring": "Return true if the target word should be registered in the search index. This method is called after stemming.", - "type": "method", - "file_path": "sphinx\\sphinx\\search\\__init__.py", - "ast_data": "FunctionDef name:word_filter arguments arg:self arg:word type:str Return return:yes" - }, - { - "library": "pytorch", - "name": "post", - "source_code": "@property def post(self) -> Optional[int]: return self._version.post[1] if self._version.post else None", - "docstring": "The post-release number of the version. >>> print(Version(\"1.2.3\").post) None >>> Version(\"1.2.3.post1\").post 1", - "type": "method", - "file_path": "pytorch\\torch\\_vendor\\packaging\\version.py", - "ast_data": "FunctionDef name:post arguments arg:self Return return:yes" - }, - { - "library": "authlib", - "name": "list_to_scope", - "source_code": "def list_to_scope(scope): if isinstance(scope, (set, tuple, list)): return ' '.join([to_unicode(s) for s in scope]) if scope is None: return scope return to_unicode(scope)", - "docstring": "Convert a list of scopes to a space separated string.", - "type": "function", - "file_path": "authlib\\authlib\\oauth2\\rfc6749\\util.py", - "ast_data": "FunctionDef name:list_to_scope arguments arg:scope If Call call:isinstance Return return:yes If Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "kornia", - "name": "image_to_tensor", - "source_code": "def image_to_tensor(image: Any, keepdim: bool = True) -> Tensor: if len(image.shape) > 4 or len(image.shape) < 2: raise ValueError('Input size must be a two, three or four dimensional array') input_shape = image.shape tensor: Tensor = torch.from_numpy(image) if len(input_shape) = = 2: tensor = tensor.unsqueeze(0) elif len(input_shape) = = 3: tensor = tensor.permute(2, 0, 1) elif len(input_shape) = = 4: tensor = tensor.permute(0, 3, 1, 2) keepdim = True else: raise ValueError(f'Cannot process image with shape {input_shape}') return tensor.unsqueeze(0) if not keepdim else tensor", - "docstring": "Convert a numpy image to a PyTorch 4d tensor image. Args: image: image of the form :math:, :math: or :math:. keepdim: If `(B, H, W, C)(B, C, H, W)(C, H, W)` otherwise. Example: >>> img = np.ones((3, 3)) >>> image_to_tensor(img).shape torch.Size([1, 3, 3]) >>> img = np.ones((4, 4, 1)) >>> image_to_tensor(img).shape torch.Size([1, 4, 4]) >>> img = np.ones((4, 4, 3)) >>> image_to_tensor(img, keepdim=False).shape torch.Size([1, 3, 4, 4])", - "type": "function", - "file_path": "kornia\\kornia\\utils\\image.py", - "ast_data": "FunctionDef name:image_to_tensor arguments arg:image type:Any arg:keepdim type:bool If BoolOp Compare op:Gt Compare op:Lt Raise raises:ValueError('Input size must be a two, three or four dimensional array') Assign If Compare op:Eq Assign Call call:unsqueeze If Compare op:Eq Assign Call call:permute If Compare op:Eq Assign Call call:permute Assign Raise raises:ValueError(f'Cannot process image with shape {input_shape}') Return return:yes" - }, - { - "library": "pytorch", - "name": "leaky_relu", - "source_code": "def leaky_relu(input: Tensor, negative_slope: float = 0.01, inplace: bool = False) -> Tensor: if has_torch_function_unary(input): return handle_torch_function(leaky_relu, (input,), input, negative_slope = negative_slope, inplace = inplace) if inplace: result = torch._C._nn.leaky_relu_(input, negative_slope) else: result = torch._C._nn.leaky_relu(input, negative_slope) return result", - "docstring": "leaky_relu(input, negative_slope=0.01, inplace=False) -> Tensor Applies element-wise, :math: See :class: for more details.", - "type": "function", - "file_path": "pytorch\\torch\\nn\\functional.py", - "ast_data": "FunctionDef name:leaky_relu arguments arg:input type:Tensor arg:negative_slope type:float arg:inplace type:bool If Call call:has_torch_function_unary Return return:yes If Assign Call call:leaky_relu_ Assign Call call:leaky_relu Return return:yes" - }, - { - "library": "pytorch", - "name": "set_stance", - "source_code": "class set_stance(_DecoratorContextManager): _dynamo_forbidden = True def __init__(self, stance: str = 'default', *, skip_guard_eval_unsafe: bool = False, force_backend = None) -> None: if force_backend is not None and stance ! = 'default': raise RuntimeError('non-default stance cannot have force_backend set') self.stance = DynamoStance(stance, skip_guard_eval_unsafe, force_backend) self.prev = _set_stance(self.stance) def __call__(self, fn): _set_stance(self.prev) wrapper = super().__call__(fn) wrapper._dynamo_forbidden = True return wrapper def __enter__(self): _set_stance(self.stance) def __exit__(self, exc_type, exc_val, exc_tb): _set_stance(self.prev) def clone(self): return self.__class__(self.stance.stance, force_backend = self.stance.backend)", - "docstring": "Decorator, context manager, function to set the current stance of the compiler. Stances documented in corresponding function in torch/compiler/__init__.py", - "type": "class", - "file_path": "pytorch\\torch\\_dynamo\\decorators.py", - "ast_data": "ClassDef name:set_stance Assign FunctionDef name:__init__ arguments arg:self arg:stance type:str If BoolOp Compare op:IsNot Compare op:NotEq Raise raises:RuntimeError('non-default stance cannot have force_backend set') Assign Call call:DynamoStance Assign Call call:_set_stance FunctionDef name:__call__ arguments arg:self arg:fn Assign Call call:__call__ Assign Return return:yes FunctionDef name:__enter__ arguments arg:self FunctionDef name:__exit__ arguments arg:self arg:exc_type arg:exc_val arg:exc_tb FunctionDef name:clone arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "FakeItemVariable", - "source_code": "class FakeItemVariable(TensorVariable): _nonvar_fields = {'need_unwrap', *TensorVariable._nonvar_fields} def __init__(self, proxy: torch.fx.Proxy, **kwargs) -> None: need_unwrap = kwargs.pop('need_unwrap', False) super().__init__(proxy, **kwargs) self.need_unwrap = need_unwrap @classmethod def from_tensor_variable(cls, tensor_variable): return FakeItemVariable(**dict(tensor_variable.__dict__))", - "docstring": "An unspecialized python variable which prevents access to the underlying raw value. This is needed if item is called on a FakeTensor.", - "type": "class", - "file_path": "pytorch\\torch\\_dynamo\\variables\\tensor.py", - "ast_data": "ClassDef name:FakeItemVariable Assign FunctionDef name:__init__ arguments arg:self arg:proxy type:torch.fx.Proxy kwarg:kwargs Assign Call call:pop Assign FunctionDef name:from_tensor_variable arguments arg:cls arg:tensor_variable Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_multialignment", - "source_code": "def set_multialignment(self, align): _api.check_in_list(['center', 'right', 'left'], align = align) self._multialignment = align self.stale = True", - "docstring": "Set the text alignment for multiline texts. The layout of the bounding box of all the lines is determined by the horizontalalignment and verticalalignment properties. This property controls the alignment of the text lines within that box. Parameters ---------- align : {'left', 'right', 'center'}", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\text.py", - "ast_data": "FunctionDef name:set_multialignment arguments arg:self arg:align Assign Assign" - }, - { - "library": "pytorch", - "name": "set_preserved_attributes", - "source_code": "def set_preserved_attributes(self, attributes: list[str]) -> PrepareCustomConfig: self.preserved_attributes = attributes return self", - "docstring": "Set the names of the attributes that will persist in the graph module even if they are not used in the model's `` method.", - "type": "method", - "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py", - "ast_data": "FunctionDef name:set_preserved_attributes arguments arg:self arg:attributes type:list[str] Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "atleast_2d", - "source_code": "def atleast_2d(arg: Union[TensorLikeType, Sequence[TensorLikeType]], *args: TensorLikeType) -> Union[TensorLikeType, tuple[TensorLikeType, ...]]: if not args and isinstance(arg, collections.abc.Sequence): args_ = arg else: assert not isinstance(arg, collections.abc.Sequence) args_ = (arg,) + args unsqueeze_atleast_1d = partial(_unsqueeze_atleast, atleast_1d, 0) res = tuple((a if a.ndim > = 2 else unsqueeze_atleast_1d(a) for a in args_)) return res if len(res) > 1 else res[0]", - "docstring": "Reference implementation of :func:.", - "type": "function", - "file_path": "pytorch\\torch\\_refs\\__init__.py", - "ast_data": "FunctionDef name:atleast_2d arguments arg:arg type:Union[TensorLikeType, Sequence[TensorLikeType]] vararg:args If BoolOp Call call:isinstance Assign Assign Assign Call call:partial Assign Call call:tuple Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, input_dataset, batch_size, row_shape): if not isinstance(dataset_ops.get_legacy_output_types(input_dataset), dtypes.DType): raise TypeError(f'`dense_to_sparse_batch` requires an input dataset whose elements have a single component, but the given dataset has the following component types: {dataset_ops.get_legacy_output_types(input_dataset)}.') self._input_dataset = input_dataset self._batch_size = batch_size self._row_shape = row_shape self._element_spec = sparse_tensor.SparseTensorSpec(tensor_shape.TensorShape([None]).concatenate(self._row_shape), dataset_ops.get_legacy_output_types(input_dataset)) variant_tensor = ged_ops.dense_to_sparse_batch_dataset(self._input_dataset._variant_tensor, self._batch_size, row_shape = convert.partial_shape_to_tensor(self._row_shape), **self._flat_structure) super(_DenseToSparseBatchDataset, self).__init__(input_dataset, variant_tensor)", - "docstring": "See for more details.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\batching.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:input_dataset arg:batch_size arg:row_shape If Raise raises:TypeError(f'`dense_to_sparse_batch` requires an input dataset whose elements have a single component, but the given dataset has the following component types: {dataset_ops.get_legacy_output_types(input_dataset)}.') Assign Assign Assign Assign Call call:SparseTensorSpec Assign Call call:dense_to_sparse_batch_dataset" - }, - { - "library": "pytorch", - "name": "make_np", - "source_code": "def make_np(x: torch.Tensor) -> np.ndarray: if isinstance(x, np.ndarray): return x if np.isscalar(x): return np.array([x]) if isinstance(x, torch.Tensor): return _prepare_pytorch(x) raise NotImplementedError(f'Got {type(x)}, but numpy array or torch tensor are expected.')", - "docstring": "Convert an object into numpy array. Args: x: An instance of torch tensor Returns: numpy.array: Numpy array", - "type": "function", - "file_path": "pytorch\\torch\\utils\\tensorboard\\_convert_np.py", - "ast_data": "FunctionDef name:make_np arguments arg:x type:torch.Tensor If Call call:isinstance Return return:yes If Call call:isscalar Return return:yes If Call call:isinstance Return return:yes Raise raises:NotImplementedError(f'Got {type(x)}, but numpy array or torch tensor are expected.')" - }, - { - "library": "authlib", - "name": "get_nonce", - "source_code": "def get_nonce(self): raise NotImplementedError()", - "docstring": "Get \"nonce\" value of the authorization code object.", - "type": "method", - "file_path": "authlib\\authlib\\oidc\\core\\models.py", - "ast_data": "FunctionDef name:get_nonce arguments arg:self Raise raises:NotImplementedError()" - }, - { - "library": "matplotlib", - "name": "autumn", - "source_code": "def autumn() -> None: set_cmap('autumn')", - "docstring": "Set the colormap to 'autumn'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", - "ast_data": "FunctionDef name:autumn arguments" - }, - { - "library": "pytorch", - "name": "group_norm", - "source_code": "def group_norm(input: Tensor, num_groups: int, weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, eps: float = 1e-05) -> Tensor: torch._check(input.ndim > = 2, lambda: f'Expected at least 2 dimensions for input tensor but received {input.ndim}') batch_size = input.shape[0] num_channels = input.shape[1] torch._check(num_channels % num_groups = = 0, lambda: 'Expected number of channels in input to be divisible by num_groups, ' + f'but got input of shape {input.shape} and num_groups = {num_groups}') flattened_inner_size = 1 for dim_length in input.shape[2:]: flattened_inner_size * = dim_length return torch.native_group_norm(input, weight, bias, batch_size, num_channels, flattened_inner_size, num_groups, eps)[0]", - "docstring": "Reference implementation of :func:.", - "type": "function", - "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py", - "ast_data": "FunctionDef name:group_norm arguments arg:input type:Tensor arg:num_groups type:int arg:weight type:Optional[Tensor] arg:bias type:Optional[Tensor] arg:eps type:float Assign Assign Assign For Return return:yes" - }, - { - "library": "numpy", - "name": "check_restrict", - "source_code": "def check_restrict(self): return check_restrict(self)", - "docstring": "Return the restrict keyword recognized by the compiler, empty string otherwise.", - "type": "method", - "file_path": "numpy\\numpy\\distutils\\command\\config.py", - "ast_data": "FunctionDef name:check_restrict arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "variable_dtype", - "source_code": "@property def variable_dtype(self): return self._variable_dtype", - "docstring": "The variable dtype of this policy. This is the dtype layers will create their variables in, unless a layer explicitly chooses a different dtype. If this is different than , Layers will cast variables to the compute dtype to avoid type errors. Variable regularizers are run in the variable dtype, not the compute dtype. Returns: The variable dtype of this policy, as a string.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\policy.py", - "ast_data": "FunctionDef name:variable_dtype arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "initial_form_count", - "source_code": "def initial_form_count(self): if self.is_bound: return self.management_form.cleaned_data[INITIAL_FORM_COUNT] else: initial_forms = len(self.initial) if self.initial else 0 return initial_forms", - "docstring": "Return the number of forms that are required in this FormSet.", - "type": "method", - "file_path": "django\\django\\forms\\formsets.py", - "ast_data": "FunctionDef name:initial_form_count arguments arg:self If Return return:yes Assign Return return:yes" - }, - { - "library": "django", - "name": "disjoint", - "source_code": "def disjoint(self, other): return self._topology(capi.ogr_disjoint, other)", - "docstring": "Return True if this geometry and the other are spatially disjoint.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", - "ast_data": "FunctionDef name:disjoint arguments arg:self arg:other Return return:yes" - }, - { - "library": "numpy", - "name": "rec_drop_fields", - "source_code": "@array_function_dispatch(_rec_drop_fields_dispatcher) def rec_drop_fields(base, drop_names): return drop_fields(base, drop_names, usemask = False, asrecarray = True)", - "docstring": "Returns a new numpy.recarray with fields in dropped.", - "type": "function", - "file_path": "numpy\\numpy\\lib\\recfunctions.py", - "ast_data": "FunctionDef name:rec_drop_fields arguments arg:base arg:drop_names Call call:array_function_dispatch Return return:yes" - }, - { - "library": "pytorch", - "name": "ConcaterIterDataPipe", - "source_code": "@functional_datapipe('concat') class ConcaterIterDataPipe(IterDataPipe): datapipes: tuple[IterDataPipe] def __init__(self, *datapipes: IterDataPipe): if len(datapipes) = = 0: raise ValueError('Expected at least one DataPipe, but got nothing') if not all((isinstance(dp, IterDataPipe) for dp in datapipes)): raise TypeError('Expected all inputs to be `IterDataPipe`') self.datapipes = datapipes def __iter__(self) -> Iterator: for dp in self.datapipes: yield from dp def __len__(self) -> int: if all((isinstance(dp, Sized) for dp in self.datapipes)): return sum((len(dp) for dp in self.datapipes)) else: raise TypeError(f\"{type(self).__name__} instance doesn't have valid length\")", - "docstring": "Concatenates multiple Iterable DataPipes (functional name: ``). The resulting DataPipe will yield all the elements from the first input DataPipe, before yielding from the subsequent ones. Args: datapipes: Iterable DataPipes being concatenated Example: >>> # xdoctest: +REQUIRES(module:torchdata) >>> import random >>> from torchdata.datapipes.iter import IterableWrapper >>> dp1 = IterableWrapper(range(3)) >>> dp2 = IterableWrapper(range(5)) >>> list(dp1.concat(dp2)) [0, 1, 2, 0, 1, 2, 3, 4]", - "type": "class", - "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\combining.py", - "ast_data": "ClassDef name:ConcaterIterDataPipe Call call:functional_datapipe FunctionDef name:__init__ arguments arg:self vararg:datapipes If Compare op:Eq Raise raises:ValueError('Expected at least one DataPipe, but got nothing') If Raise raises:TypeError('Expected all inputs to be `IterDataPipe`') Assign FunctionDef name:__iter__ arguments arg:self For FunctionDef name:__len__ arguments arg:self If Call call:all Return return:yes Raise raises:TypeError(f\"{type(self).__name__} instance doesn't have valid length\")" - }, - { - "library": "matplotlib", - "name": "minpos", - "source_code": "@property def minpos(self): return self._minpos", - "docstring": "The minimum positive value in both directions within the Bbox. This is useful when dealing with logarithmic scales and other scales where negative bounds result in floating point errors, and will be used as the minimum extent instead of *p0*.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", - "ast_data": "FunctionDef name:minpos arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "BaseHeuristicSingleton", - "source_code": "class BaseHeuristicSingleton(type): _instances: dict[type[Any], Any] = {} _lock: Lock = Lock() def __call__(cls: BaseHeuristicSingleton, *args: Any, **kwargs: Any) -> BaseConfigHeuristic: with cls._lock: if cls not in cls._instances: instance = super().__call__() cls._instances[cls] = instance return cls._instances[cls]", - "docstring": "Thread-safe implementation of single to be used in the config heuristic subclasses to ensure heavy __init__ calls are not repeatedly run", - "type": "class", - "file_path": "pytorch\\torch\\_inductor\\template_heuristics.py", - "ast_data": "ClassDef name:BaseHeuristicSingleton FunctionDef name:__call__ arguments arg:cls type:BaseHeuristicSingleton vararg:args kwarg:kwargs With If Compare op:NotIn Assign Call call:__call__ Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "weights", - "source_code": "@property def weights(self): return self._weights", - "docstring": "Returns variables of this Optimizer based on the order created.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py", - "ast_data": "FunctionDef name:weights arguments arg:self Return return:yes" - }, - { - "library": "scikit-learn", - "name": "get_precision", - "source_code": "def get_precision(self): if self.store_precision: precision = self.precision_ else: precision = linalg.pinvh(self.covariance_, check_finite = False) return precision", - "docstring": "Getter for the precision matrix. Returns ------- precision_ : array-like of shape (n_features, n_features) The precision matrix associated to the current covariance object.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\covariance\\_empirical_covariance.py", - "ast_data": "FunctionDef name:get_precision arguments arg:self If Assign Assign Call call:pinvh Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, element_type, dimensions, layout = None): self.message = xla_data_pb2.ShapeProto() self.message.element_type = element_type if element_type = = xla_data_pb2.TUPLE: if not all((isinstance(subshape, Shape) for subshape in dimensions)): raise ValueError('XLA tuple requires sequence of Shape objects as dimensions') self._tuple_shapes = tuple(dimensions) for component_shape in self._tuple_shapes: component_message = self.message.tuple_shapes.add() component_message.CopyFrom(component_shape.message) else: self.message.dimensions.extend(dimensions) if layout is None: layout = list(reversed(range(len(dimensions)))) self.message.layout.minor_to_major.extend(layout)", - "docstring": "Creates a new XLA Shape. Args: element_type: element type from xla_data_pb2. dimensions: sequence of dimensions sizes (integers), or sequence of Shapes in the case of a tuple, i.e. when element_type is TUPLE. layout: optional minor_to_major sequence for layout. If not given, the default major-to-minor layout is used. Raises: ValueError: if element_type is TUPLE but dimensions are not Shape objects.", - "type": "method", - "file_path": "tensorflow\\third_party\\xla\\xla\\python_api\\xla_shape.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:element_type arg:dimensions arg:layout Assign Call call:ShapeProto Assign If Compare op:Eq If Raise raises:ValueError('XLA tuple requires sequence of Shape objects as dimensions') Assign Call call:tuple For Assign Call call:add If Compare op:Is Assign Call call:list" - }, - { - "library": "tensorflow", - "name": "convolution_kernel", - "source_code": "def convolution_kernel(self, name = 'convolution_kernel'): with self._name_scope(name): h = self._ifft(_to_complex(self.spectrum)) return math_ops.cast(h, self.dtype)", - "docstring": "Convolution kernel corresponding to . The dimensional DFT of this kernel is the frequency domain spectrum of this operator. Args: name: A name to give this . Returns: with .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_circulant.py", - "ast_data": "FunctionDef name:convolution_kernel arguments arg:self arg:name With Assign Call call:_ifft Return return:yes" - }, - { - "library": "matplotlib", - "name": "transmute", - "source_code": "def transmute(self, path, mutation_size, linewidth): raise NotImplementedError('Derived must override')", - "docstring": "The transmute method is the very core of the ArrowStyle class and must be overridden in the subclasses. It receives the *path* object along which the arrow will be drawn, and the *mutation_size*, with which the arrow head etc. will be scaled. The *linewidth* may be used to adjust the path so that it does not pass beyond the given points. It returns a tuple of a instance and a boolean. The boolean value indicate whether the path can be filled or not. The return value can also be a list of paths and list of booleans of the same length.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:transmute arguments arg:self arg:path arg:mutation_size arg:linewidth Raise raises:NotImplementedError('Derived must override')" - }, - { - "library": "pytorch", - "name": "cat_slice_cat", - "source_code": "@register_lowering_pattern(CallFunction(aten.cat, [_cat_1, CallFunction(aten.slice, _cat_1, 1, 0, KeywordArg('size'))], 1)) def cat_slice_cat(match, cat_input, size, dim = 1): first, *rest = cat_input if size > = 0 and V.graph.sizevars.statically_known_leq(size, first.get_size()[dim]): return L[aten.cat]([first, *rest, L[aten.slice](first, dim, 0, size)], dim) else: tmp = L[aten.cat](cat_input, dim) return L[aten.cat]([tmp, L[aten.slice](tmp, dim, 0, size)], dim)", - "docstring": "This is an example of a more complex pattern where cat_1 is used multiple times inside the pattern. We fold 2 calls to cat into one. Matches: cat_1: f32[1024, 4077] = torch.ops.aten.cat.default([add_26, primals_217], 1) slice_1: f32[1024, 4077] = torch.ops.aten.slice.Tensor(cat_1, 0, 0, 9223372036854775807) slice_2: f32[1024, 19] = torch.ops.aten.slice.Tensor(slice_1, 1, 0, 19) cat_2: f32[1024, 4096] = torch.ops.aten.cat.default([cat_1, slice_2], 1) Rewrite to: slice_2 = torch.ops.aten.slice.Tensor(add_26, 1, 0, 19) cat_2 = torch.ops.aten.cat.default([add_26, primals_217, slice2], 1)", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py", - "ast_data": "FunctionDef name:cat_slice_cat arguments arg:match arg:cat_input arg:size arg:dim Call call:register_lowering_pattern Assign If BoolOp Compare op:GtE Call call:statically_known_leq Return return:yes Assign Call Return return:yes" - }, - { - "library": "scikit-learn", - "name": "decision_function", - "source_code": "def decision_function(self, X): check_is_fitted(self) xp, _ = get_namespace(X) X = validate_data(self, X, accept_sparse = 'csr', reset = False) scores = safe_sparse_dot(X, self.coef_.T, dense_output = True) + self.intercept_ return xp.reshape(scores, (-1,)) if scores.ndim > 1 and scores.shape[1] = = 1 else scores", - "docstring": "Predict confidence scores for samples. The confidence score for a sample is proportional to the signed distance of that sample to the hyperplane. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The data matrix for which we want to get the confidence scores. Returns ------- scores : ndarray of shape (n_samples,) or (n_samples, n_classes) Confidence scores per combination. In the binary case, confidence score for where >0 means this class would be predicted.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\linear_model\\_base.py", - "ast_data": "FunctionDef name:decision_function arguments arg:self arg:X Assign Call call:get_namespace Assign Call call:validate_data Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "dfs_helper", - "source_code": "def dfs_helper(partition: Partition, latency_so_far_sec: float) -> float: latency_so_far_sec + = partition_to_latency_mapping[partition].overall_latency_sec if partition.children: max_latency_sec = 0.0 for child in partition.children: comm_latency_sec = get_comm_latency_between(partition, child, transfer_rate_bytes_per_sec) new_latency_sec = dfs_helper(child, latency_so_far_sec + comm_latency_sec) if new_latency_sec > max_latency_sec: max_latency_sec = new_latency_sec return max_latency_sec return latency_so_far_sec", - "docstring": "This function helps to recursively get the latency of a path of partitions", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\partitioner_utils.py", - "ast_data": "FunctionDef name:dfs_helper arguments arg:partition type:Partition arg:latency_so_far_sec type:float If Assign For Assign Call call:get_comm_latency_between Assign Call call:dfs_helper If Compare op:Gt Assign Return return:yes Return return:yes" - }, - { - "library": "numpy", - "name": "CCompiler_customize", - "source_code": "def CCompiler_customize(self, dist, need_cxx = 0): log.info('customize %s' % self.__class__.__name__) customize_compiler(self) if need_cxx: try: self.compiler_so.remove('-Wstrict-prototypes') except (AttributeError, ValueError): pass if hasattr(self, 'compiler') and 'cc' in self.compiler[0]: if not self.compiler_cxx: if self.compiler[0].startswith('gcc'): a, b = ('gcc', 'g++') else: a, b = ('cc', 'c++') self.compiler_cxx = [self.compiler[0].replace(a, b)] + self.compiler[1:] else: if hasattr(self, 'compiler'): log.warn('#### %s #######' % (self.compiler,)) if not hasattr(self, 'compiler_cxx'): log.warn('Missing compiler_cxx fix for ' + self.__class__.__name__) if hasattr(self, 'compiler') and ('gcc' in self.compiler[0] or 'g++' in self.compiler[0] or 'clang' in self.compiler[0]): self._auto_depends = True elif os.name = = 'posix': import tempfile import shutil tmpdir = tempfile.mkdtemp() try: fn = os.path.join(tmpdir, 'file.c') with open(fn, 'w') as f: f.write('int a;\\n') self.compile([fn], output_dir = tmpdir, extra_preargs = ['-MMD', '-MF', fn + '.d']) self._auto_depends = True except CompileError: self._auto_depends = False finally: shutil.rmtree(tmpdir) return", - "docstring": "Do any platform-specific customization of a compiler instance. This method calls `` option is removed to prevent spurious warnings. Default is False. Returns ------- None Notes ----- All the default options used by distutils can be extracted with:: from distutils import sysconfig sysconfig.get_config_vars('CC', 'CXX', 'OPT', 'BASECFLAGS', 'CCSHARED', 'LDSHARED', 'SO')", - "type": "function", - "file_path": "numpy\\numpy\\distutils\\ccompiler.py", - "ast_data": "FunctionDef name:CCompiler_customize arguments arg:self arg:dist arg:need_cxx If Try ExceptHandler If BoolOp Call call:hasattr Compare op:In If If Call call:startswith Assign Assign Assign If Call call:hasattr If If BoolOp Call call:hasattr BoolOp Compare op:In Compare op:In Compare op:In Assign If Compare op:Eq Assign Call call:mkdtemp Try Assign Call call:join With Assign ExceptHandler Assign Return return:no" - }, - { - "library": "django", - "name": "get_safe_request_meta", - "source_code": "def get_safe_request_meta(self, request): if not hasattr(request, 'META'): return {} return {k: self.cleanse_setting(k, v) for k, v in request.META.items()}", - "docstring": "Return a dictionary of request.META with sensitive values redacted.", - "type": "method", - "file_path": "django\\django\\views\\debug.py", - "ast_data": "FunctionDef name:get_safe_request_meta arguments arg:self arg:request If Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "read_var_header", - "source_code": "def read_var_header(self): hdr = self._matrix_reader.read_header() remaining_bytes = reduce(mul, hdr.dims, np.int64(hdr.dtype.itemsize)) if hdr.is_complex and (not hdr.mclass = = mxSPARSE_CLASS): remaining_bytes * = 2 next_position = self.mat_stream.tell() + remaining_bytes return (hdr, next_position)", - "docstring": "Read and return header, next position Parameters ---------- None Returns ------- header : object object that can be passed to self.read_var_array, and that has attributes `` next_position : int position in stream of next variable", - "type": "method", - "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py", - "ast_data": "FunctionDef name:read_var_header arguments arg:self Assign Call call:read_header Assign Call call:reduce If BoolOp Assign Return return:yes" - }, - { - "library": "cherrypy", - "name": "index", - "source_code": "@cherrypy.expose def index(self): return '\\n Remi Delon
\\n Hendrik Mans
\\n Lorenzo Lamas
\\n '", - "docstring": "Produce HTTP response body of the users app index URI.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\tutorial\\tut06_default_method.py", - "ast_data": "FunctionDef name:index arguments arg:self Return return:yes" - }, - { - "library": "scikit-learn", - "name": "decision_function", - "source_code": "def decision_function(self, X): X = validate_data(self, X, dtype = DTYPE, order = 'C', accept_sparse = 'csr', reset = False) raw_predictions = self._raw_predict(X) if raw_predictions.shape[1] = = 1: return raw_predictions.ravel() return raw_predictions", - "docstring": "Compute the decision function of `classes_`. Regression and binary classification produce an array of shape (n_samples,).", - "type": "method", - "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py", - "ast_data": "FunctionDef name:decision_function arguments arg:self arg:X Assign Call call:validate_data Assign Call call:_raw_predict If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "ShowDirs", - "source_code": "@cli.cls_cmd('show_PYTHONPATH') class ShowDirs(Python): ctx = CONTEXT pythonpath = Python.pythonpath extra_argv = Python.extra_argv @classmethod def run(cls, pythonpath, extra_argv, **kwargs): cls._setup(pythonpath, **kwargs) py_path = os.environ.get('PYTHONPATH', '') click.echo(f'PYTHONPATH = {py_path}')", - "docstring": ":information: Show value of the PYTHONPATH environment variable used in this script. PYTHONPATH sets the default search path for module files for the interpreter. Here, it includes the path to the local SciPy build (typically ). Use the global option to skip the building step, e.g.:", - "type": "class", - "file_path": "scipy\\dev.py", - "ast_data": "ClassDef name:ShowDirs Call call:cls_cmd Assign Assign Assign FunctionDef name:run arguments arg:cls arg:pythonpath arg:extra_argv kwarg:kwargs Assign Call call:get" - }, - { - "library": "cherrypy", - "name": "now", - "source_code": "def now(self): return datetime.datetime.now()", - "docstring": "Generate the session specific concept of 'now'. Other session providers can override this to use alternative, possibly timezone aware, versions of 'now'.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\lib\\sessions.py", - "ast_data": "FunctionDef name:now arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "skip_backend", - "source_code": "def skip_backend(backend): tid = threading.get_native_id() try: return backend.__ua_cache__[tid, 'skip'] except AttributeError: backend.__ua_cache__ = {} except KeyError: pass ctx = _SkipBackendContext(backend) backend.__ua_cache__[tid, 'skip'] = ctx return ctx", - "docstring": "A context manager that allows one to skip a given backend from processing entirely. This allows one to use another backend's code in a library that is also a consumer of the same backend. Parameters ---------- backend The backend to skip. See Also -------- set_backend: A context manager that allows setting of backends. set_global_backend: Set a single, global backend for a domain.", - "type": "function", - "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py", - "ast_data": "FunctionDef name:skip_backend arguments arg:backend Assign Call call:get_native_id Try Return return:yes ExceptHandler Assign ExceptHandler Assign Call call:_SkipBackendContext Assign Return return:yes" - }, - { - "library": "scipy", - "name": "rvs", - "source_code": "def rvs(self, m, n, size = None, random_state = None): M, m, n, _, _, _ = self._process_parameters(m, n) random_state = self._get_random_state(random_state) if size is not None and isinstance(size, int): size = (size,) if size is None: rvs = np.empty(m.shape, dtype = m.dtype) else: rvs = np.empty(size + (m.shape[-1],), dtype = m.dtype) rem = M for c in range(m.shape[-1] - 1): rem = rem - m[..., c] n0mask = n = = 0 rvs[..., c] = ~n0mask * random_state.hypergeometric(m[..., c], rem + n0mask, n + n0mask, size = size) n = n - rvs[..., c] rvs[..., m.shape[-1] - 1] = n return rvs", - "docstring": "Draw random samples from a multivariate hypergeometric distribution. Parameters ---------- %(_doc_default_callparams)s size : integer or iterable of integers, optional Number of samples to draw. Default is `multivariate_hypergeometric` sampler is not used as it doesn't support broadcasting.", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_multivariate.py", - "ast_data": "FunctionDef name:rvs arguments arg:self arg:m arg:n arg:size arg:random_state Assign Call call:_process_parameters Assign Call call:_get_random_state If BoolOp Compare op:IsNot Call call:isinstance Assign If Compare op:Is Assign Call call:empty Assign Call call:empty Assign For Call call:range Assign Assign Compare op:Eq Assign Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "Server", - "source_code": "@tf_export('distribute.Server', v1 = ['distribute.Server', 'train.Server']) @deprecation.deprecated_endpoints('train.Server') class Server: def __init__(self, server_or_cluster_def, job_name = None, task_index = None, protocol = None, config = None, start = True): self._server_def = _make_server_def(server_or_cluster_def, job_name, task_index, protocol, config) self._server = c_api.TF_NewServer(self._server_def.SerializeToString()) if start: self.start() def __del__(self): if errors is not None: exception = errors.UnimplementedError else: exception = Exception try: c_api.TF_ServerStop(self._server) except AttributeError: pass except exception: pass self._server = None def start(self): c_api.TF_ServerStart(self._server) def join(self): c_api.TF_ServerJoin(self._server) @property def server_def(self): return self._server_def @property def target(self): return c_api.TF_ServerTarget(self._server) @staticmethod def create_local_server(config = None, start = True): return Server({'localhost': ['localhost: 0']}, protocol = 'grpc', config = config, start = start)", - "docstring": "An in-process TensorFlow server, for use in distributed training. A instance encapsulates a set of devices and a target that can participate in distributed training. A server belongs to a cluster (specified by a ), and corresponds to a particular task in a named job. The server can communicate with any other server in the same cluster.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\training\\server_lib.py", - "ast_data": "ClassDef name:Server Call call:tf_export Call call:deprecated_endpoints FunctionDef name:__init__ arguments arg:self arg:server_or_cluster_def arg:job_name arg:task_index arg:protocol arg:config arg:start Assign Call call:_make_server_def Assign Call call:TF_NewServer If FunctionDef name:__del__ arguments arg:self If Compare op:IsNot Assign Assign Try ExceptHandler ExceptHandler Assign FunctionDef name:start arguments arg:self FunctionDef name:join arguments arg:self FunctionDef name:server_def arguments arg:self Return return:yes FunctionDef name:target arguments arg:self Return return:yes FunctionDef name:create_local_server arguments arg:config arg:start Return return:yes" - }, - { - "library": "pandas", - "name": "dataclasses_to_dicts", - "source_code": "def dataclasses_to_dicts(data): from dataclasses import asdict return list(map(asdict, data))", - "docstring": "Converts a list of dataclass instances to a list of dictionaries. Parameters ---------- data : List[Type[dataclass]] Returns -------- list_dict : List[dict] Examples -------- >>> from dataclasses import dataclass >>> @dataclass ... class Point: ... x: int ... y: int >>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)]) [{'x': 1, 'y': 2}, {'x': 2, 'y': 3}]", - "type": "function", - "file_path": "pandas\\pandas\\core\\internals\\construction.py", - "ast_data": "FunctionDef name:dataclasses_to_dicts arguments arg:data Return return:yes" - }, - { - "library": "mongo", - "name": "find_raw_batches", - "source_code": "def find_raw_batches(self, *args: Any, **kwargs: Any) -> RawBatchCursor[_DocumentType]: if self._database.client._encrypter: raise InvalidOperation('find_raw_batches does not support auto encryption') return RawBatchCursor(self, *args, **kwargs)", - "docstring": "Query the database and retrieve batches of raw BSON. Similar to the :meth: method but returns a :class:. This example demonstrates how to work with raw batches, but in practice raw batches should be passed to an external library that can decode BSON into another data type, rather than used with PyMongo's :mod: module. >>> import bson >>> cursor = db.test.find_raw_batches() >>> for batch in cursor: ... print(bson.decode_all(batch)) .. note:: find_raw_batches does not support auto encryption. .. versionchanged:: 3.12 Instead of ignoring the user-specified read concern, this method now sends it to the server when connected to MongoDB 3.6+. Added session support. .. versionadded:: 3.6", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\collection.py", - "ast_data": "FunctionDef name:find_raw_batches arguments arg:self vararg:args kwarg:kwargs If Raise raises:InvalidOperation('find_raw_batches does not support auto encryption') Return return:yes" - }, - { - "library": "scipy", - "name": "ElAttarVidyasagarDutta", - "source_code": "class ElAttarVidyasagarDutta(Benchmark): def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N)) self.custom_bounds = [(-4, 4), (-4, 4)] self.global_optimum = [[3.40918683, -2.17143304]] self.fglob = 1.712780354 def fun(self, x, *args): self.nfev + = 1 return (x[0] ** 2 + x[1] - 10) ** 2 + (x[0] + x[1] ** 2 - 7) ** 2 + (x[0] ** 2 + x[1] ** 3 - 1) ** 2", - "docstring": "El-Attar-Vidyasagar-Dutta [1]_ objective function. This class defines the El-Attar-Vidyasagar-Dutta function global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{ElAttarVidyasagarDutta}}(x) = (x_1^2 + x_2 - 10)^2 + (x_1 + x_2^2 - 7)^2 + (x_1^2 + x_2^3 - 1)^2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_E.py", - "ast_data": "ClassDef name:ElAttarVidyasagarDutta FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Return return:yes" - }, - { - "library": "pytorch", - "name": "add_scheduler_init_hook", - "source_code": "def add_scheduler_init_hook(pre_fn: Callable[..., Any], post_fn: Optional[Callable[..., Any]] = None) -> Any: from torch._inductor.scheduler import Scheduler orig_fn = Scheduler.__init__ def wrapper(scheduler: Any, nodes: Any) -> Any: pre_fn(scheduler, nodes) out = orig_fn(scheduler, nodes) if post_fn: post_fn(scheduler, nodes) return out return unittest.mock.patch.object(Scheduler, '__init__', wrapper)", - "docstring": "Add hook functions to be called at the beginning and end of Scheduler.__init__. Used for unit tests.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\utils.py", - "ast_data": "FunctionDef name:add_scheduler_init_hook arguments arg:pre_fn type:Callable[..., Any] arg:post_fn type:Optional[Callable[..., Any]] Assign FunctionDef name:wrapper arguments arg:scheduler type:Any arg:nodes type:Any Assign Call call:orig_fn If Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "set_ordered", - "source_code": "def set_ordered(self, value: bool) -> Self: new_dtype = CategoricalDtype(self.categories, ordered = value) cat = self.copy() NDArrayBacked.__init__(cat, cat._ndarray, new_dtype) return cat", - "docstring": "Set the ordered attribute to the boolean value. Parameters ---------- value : bool Set whether this categorical is ordered (True) or not (False).", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", - "ast_data": "FunctionDef name:set_ordered arguments arg:self arg:value type:bool Assign Call call:CategoricalDtype Assign Call call:copy Return return:yes" - }, - { - "library": "pytorch", - "name": "__init__", - "source_code": "def __init__(self, graph_a: _C.Graph, graph_b: _C.Graph): self.graph_a = graph_a self.graph_b = graph_b", - "docstring": "Construct a _GraphDiff object. Args: graph_a (_C.Graph): First graph to compare. graph_b (_C.Graph): Second graph to compare.", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\verification.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:graph_a type:_C.Graph arg:graph_b type:_C.Graph Assign Assign" - }, - { - "library": "numpy", - "name": "identity", - "source_code": "@finalize_array_function_like @set_module('numpy') def identity(n, dtype = None, *, like = None): if like is not None: return _identity_with_like(like, n, dtype = dtype) from numpy import eye return eye(n, dtype = dtype, like = like)", - "docstring": "Return the identity array. The identity array is a square array with ones on the main diagonal. Parameters ---------- n : int Number of rows (and columns) in x output. dtype : data-type, optional Data-type of the output. Defaults to `nn` array with its main diagonal set to one, and all other elements 0. Examples -------- >>> import numpy as np >>> np.identity(3) array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]])", - "type": "function", - "file_path": "numpy\\numpy\\_core\\numeric.py", - "ast_data": "FunctionDef name:identity arguments arg:n arg:dtype Call call:set_module If Compare op:IsNot Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "value_counts", - "source_code": "def value_counts(self, dropna: bool = True) -> Series: result = value_counts(np.asarray(self), dropna = dropna) result.index = result.index.astype(self.dtype) return result", - "docstring": "Returns a Series containing counts of each interval. Parameters ---------- dropna : bool, default True Don't include counts of NaN. Returns ------- counts : Series See Also -------- Series.value_counts", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\interval.py", - "ast_data": "FunctionDef name:value_counts arguments arg:self arg:dropna type:bool Assign Call call:value_counts Assign Call call:astype Return return:yes" - }, - { - "library": "pytorch", - "name": "same_two_models", - "source_code": "def same_two_models(gm, opt_gm, example_inputs, only_fwd = False, *, require_fp64 = False, ignore_non_fp = False): from .utils import same ref = run_fwd_maybe_bwd(gm, example_inputs, only_fwd) fp64_ref = None if config.same_two_models_use_fp64: try: fp64_model, fp64_examples = cast_to_fp64(copy.deepcopy(gm), clone_inputs_retaining_gradness(example_inputs)) fp64_ref = run_fwd_maybe_bwd(fp64_model, fp64_examples, only_fwd) except Exception: if require_fp64: raise RuntimeError('Could not generate fp64 outputs, workaround with torch._dynamo.config.same_two_models_use_fp64 = False') log.warning('Could not generate fp64 outputs') try: res = run_fwd_maybe_bwd(opt_gm, example_inputs, only_fwd) except Exception: log.exception('While minifying the program in accuracy minification mode, ran into a runtime exception which is likely an unrelated issue. Skipping this graph.') return True passing = same(ref, res, fp64_ref, tol = config.repro_tolerance, equal_nan = True, ignore_non_fp = ignore_non_fp) return passing", - "docstring": "Check two models have same accuracy. require_fp64: if True, raise an error if we unable to calculate the fp64 reference ignore_non_fp: if True, do not compare outputs which are not floating point. This is mostly useful for the minifier (which wants to avoid quantizing floating point error into integer/boolean error)", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\debug_utils.py", - "ast_data": "FunctionDef name:same_two_models arguments arg:gm arg:opt_gm arg:example_inputs arg:only_fwd Assign Call call:run_fwd_maybe_bwd Assign If Try Assign Call call:cast_to_fp64 Assign Call call:run_fwd_maybe_bwd ExceptHandler If Raise raises:RuntimeError('Could not generate fp64 outputs, workaround with torch._dynamo.config.same_two_models_use_fp64 = False') Try Assign Call call:run_fwd_maybe_bwd ExceptHandler Return return:yes Assign Call call:same Return return:yes" - }, - { - "library": "pytorch", - "name": "model_is_exported", - "source_code": "def model_is_exported(m: torch.nn.Module) -> bool: return isinstance(m, torch.fx.GraphModule) and any(('val' in n.meta for n in m.graph.nodes))", - "docstring": "Return True if the was exported, False otherwise (e.g. if the model was FX symbolically traced or not traced at all).", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\export_utils.py", - "ast_data": "FunctionDef name:model_is_exported arguments arg:m type:torch.nn.Module Return return:yes" - }, - { - "library": "authlib", - "name": "get_server_metadata", - "source_code": "def get_server_metadata(self): raise NotImplementedError()", - "docstring": "Return server metadata which includes supported grant types, response types and etc.", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\rfc7591\\endpoint.py", - "ast_data": "FunctionDef name:get_server_metadata arguments arg:self Raise raises:NotImplementedError()" - }, - { - "library": "scipy", - "name": "parzen", - "source_code": "def parzen(M, sym = True, *, xp = None, device = None): xp = _namespace(xp) if _len_guards(M): return xp.ones(M, dtype = xp.float64, device = device) M, needs_trunc = _extend(M, sym) n = xp.arange(-(M - 1) / 2.0, (M - 1) / 2.0 + 0.5, 1.0, dtype = xp.float64, device = device) w = xp.where(abs(n) < = (M - 1) / 4.0, 1 - 6 * (abs(n) / (M / 2.0)) ** 2.0 + 6 * (abs(n) / (M / 2.0)) ** 3.0, 2 * (1 - abs(n) / (M / 2.0)) ** 3.0) return _truncate(w, needs_trunc)", - "docstring": "Return a Parzen window. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. sym : bool, optional When True (default), generates a symmetric window, for use in filter design. When False, generates a periodic window, for use in spectral analysis. %(xp_device_snippet)s Returns ------- w : ndarray The window, with the maximum value normalized to 1 (though the value 1 does not appear if is even and is True). References ---------- .. [1] E. Parzen, \"Mathematical Considerations in the Estimation of Spectra\", Technometrics, Vol. 3, No. 2 (May, 1961), pp. 167-190 Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.parzen(51) >>> plt.plot(window) >>> plt.title(\"Parzen window\") >>> plt.ylabel(\"Amplitude\") >>> plt.xlabel(\"Sample\") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title(\"Frequency response of the Parzen window\") >>> plt.ylabel(\"Normalized magnitude [dB]\") >>> plt.xlabel(\"Normalized frequency [cycles per sample]\")", - "type": "function", - "file_path": "scipy\\scipy\\signal\\windows\\_windows.py", - "ast_data": "FunctionDef name:parzen arguments arg:M arg:sym Assign Call call:_namespace If Call call:_len_guards Return return:yes Assign Call call:_extend Assign Call call:arange Assign Call call:where Return return:yes" - }, - { - "library": "pytorch", - "name": "named_parameters", - "source_code": "def named_parameters(self, prefix: str = '', recurse: bool = True, remove_duplicate: bool = True) -> Iterator[tuple[str, Parameter]]: gen = self._named_members(lambda module: module._parameters.items(), prefix = prefix, recurse = recurse, remove_duplicate = remove_duplicate) yield from gen", - "docstring": "Return an iterator over module parameters, yielding both the name of the parameter as well as the parameter itself. Args: prefix (str): prefix to prepend to all parameter names. recurse (bool): if True, then yields parameters of this module and all submodules. Otherwise, yields only parameters that are direct members of this module. remove_duplicate (bool, optional): whether to remove the duplicated parameters in the result. Defaults to True. Yields: (str, Parameter): Tuple containing the name and parameter Example:: >>> # xdoctest: +SKIP(\"undefined vars\") >>> for name, param in self.named_parameters(): >>> if name in ['bias']: >>> print(param.size())", - "type": "method", - "file_path": "pytorch\\torch\\nn\\modules\\module.py", - "ast_data": "FunctionDef name:named_parameters arguments arg:self arg:prefix type:str arg:recurse type:bool arg:remove_duplicate type:bool Assign Call call:_named_members" - }, - { - "library": "algorithms", - "name": "power", - "source_code": "def power(a: int, n: int, mod: int = None): ans = 1 while n: if n & 1: ans = ans * a a = a * a if mod: ans % = mod a % = mod n >> = 1 return ans", - "docstring": "Iterative version of binary exponentiation Calculate a ^ n if mod is specified, return the result modulo mod Time Complexity : O(log(n)) Space Complexity : O(1)", - "type": "function", - "file_path": "algorithms\\algorithms\\maths\\power.py", - "ast_data": "FunctionDef name:power arguments arg:a type:int arg:n type:int arg:mod type:int Assign While If Assign Assign If Return return:yes" - }, - { - "library": "tensorflow", - "name": "keyword_args_only", - "source_code": "def keyword_args_only(func): decorator_utils.validate_callable(func, 'keyword_args_only') @functools.wraps(func) def new_func(*args, **kwargs): if args: raise ValueError(f'The function {func.__name__} only accepts keyword arguments. Do not pass positional arguments. Received the following positional arguments: {args}') return func(**kwargs) return new_func", - "docstring": "Decorator for marking specific function accepting keyword args only. This decorator raises a if the input is called with any non-keyword args. This prevents the caller from providing the arguments in wrong order. Args: func: The function or method needed to be decorated. Returns: Decorated function or method. Raises: ValueError: If is not callable.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\util\\keyword_args.py", - "ast_data": "FunctionDef name:keyword_args_only arguments arg:func FunctionDef name:new_func arguments vararg:args kwarg:kwargs Call call:wraps If Raise raises:ValueError(f'The function {func.__name__} only accepts keyword arguments. Do not pass positional arguments. Received the following positional arguments: {args}') Return return:yes Return return:yes" - }, - { - "library": "kornia", - "name": "CornerGFTT", - "source_code": "class CornerGFTT(Module): def __init__(self, grads_mode: str = 'sobel') -> None: super().__init__() self.grads_mode: str = grads_mode def __repr__(self) -> str: return f'{self.__class__.__name__}(grads_mode = {self.grads_mode})' def forward(self, input: Tensor, sigmas: Optional[Tensor] = None) -> Tensor: return gftt_response(input, self.grads_mode, sigmas)", - "docstring": "Module that calculates Shi-Tomasi corners. .. image:: _static/img/gftt_response.png See :func: for details.", - "type": "class", - "file_path": "kornia\\kornia\\feature\\responses.py", - "ast_data": "ClassDef name:CornerGFTT FunctionDef name:__init__ arguments arg:self arg:grads_mode type:str FunctionDef name:__repr__ arguments arg:self Return return:yes FunctionDef name:forward arguments arg:self arg:input type:Tensor arg:sigmas type:Optional[Tensor] Return return:yes" - }, - { - "library": "pytorch", - "name": "get_observer_state_dict", - "source_code": "def get_observer_state_dict(mod): od = OrderedDict() if isinstance(mod, torch.jit.RecursiveScriptModule): for k, v in mod.state_dict().items(): if 'observer' in k: od[k] = v else: for k, v in mod.state_dict().items(): if 'activation_post_process' in k: od[k] = v od._metadata = mod.state_dict()._metadata return od", - "docstring": "Returns the state dict corresponding to the observer stats. Traverse the model state_dict and extract out the stats.", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\observer.py", - "ast_data": "FunctionDef name:get_observer_state_dict arguments arg:mod Assign Call call:OrderedDict If Call call:isinstance For Call call:items If Compare op:In Assign For Call call:items If Compare op:In Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "load_ratio_right", - "source_code": "def load_ratio_right(M: int, N: int, O: int, P: int, m: int, n: int, o: int, p: int) -> float: base = N * O + O * P + M * N + N * P gemm = ceildiv(M, m) * ceildiv(P, p) * ceildiv(N, n) * (m * n + ceildiv(O, o) * (n * o + o * p)) return base / gemm", - "docstring": "compute the ratio of estimated numbers of loads in baseline and b2bgemm M, N, O, P are matrix sizes m, n, o, p are block sizes | | baseline (lower bound) | b2bgemm | load | N * O + O * P + M * N + N * P | M / m * P / p * N / n * (m * n + O / o * (n * o + o * p)) | store | N * P + M * P | M * P b2bgemm is always better on stores, but for loads we need to find out beneficial cases using this function", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\fx_passes\\b2b_gemm.py", - "ast_data": "FunctionDef name:load_ratio_right arguments arg:M type:int arg:N type:int arg:O type:int arg:P type:int arg:m type:int arg:n type:int arg:o type:int arg:p type:int Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_tf_type_name", - "source_code": "def get_tf_type_name(tf_type): return 'tf.' + tf_type.name if tf_type else None", - "docstring": "Converts tf.dtype (eg: tf.float32) to str (eg: \"tf.float32\").", - "type": "function", - "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py", - "ast_data": "FunctionDef name:get_tf_type_name arguments arg:tf_type Return return:yes" - }, - { - "library": "pytorch", - "name": "constant_to_device", - "source_code": "def constant_to_device(self, device: torch.device) -> IRNode: return self.data.constant_to_device(device)", - "docstring": "Move this to a given device. Requires that all reads are to constants.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\ir.py", - "ast_data": "FunctionDef name:constant_to_device arguments arg:self arg:device type:torch.device Return return:yes" - }, - { - "library": "pytorch", - "name": "make_call_generated_code", - "source_code": "def make_call_generated_code(self, fn_name: str) -> None: self.extend_output(self.load_function_name(fn_name, True)) graphargs = self.tx.output.graphargs seen_sources: OrderedSet[Source] = OrderedSet() def collect_temp_source(source): if source in seen_sources: self.mark_source_temp(source) return seen_sources.add(source) if isinstance(source, ChainedSource): collect_temp_source(source.base) if isinstance(source, DictGetItemSource) and isinstance(source.index, Source): collect_temp_source(source.index) for arg in graphargs: if arg.source is not None: collect_temp_source(arg.source) for arg in graphargs: if arg.pass_arg_as_tensor: self.add_push_null(lambda: self.extend_output([self.create_load_python_module(torch), self.create_load_attr('_as_tensor_fullprec')])) self.call_reconstruct(arg) self.extend_output(create_call_function(1, False)) else: self.call_reconstruct(arg) self.extend_output(create_call_function(len(graphargs), False))", - "docstring": "Call the generated code function stored in fn_name", - "type": "method", - "file_path": "pytorch\\torch\\_dynamo\\codegen.py", - "ast_data": "FunctionDef name:make_call_generated_code arguments arg:self arg:fn_name type:str Assign FunctionDef name:collect_temp_source arguments arg:source If Compare op:In Return return:no If Call call:isinstance If BoolOp Call call:isinstance Call call:isinstance For If Compare op:IsNot For If" - }, - { - "library": "django", - "name": "d", - "source_code": "def d(self): return '%02d' % self.data.day", - "docstring": "Day of the month, 2 digits with leading zeros; i.e. '01' to '31'", - "type": "method", - "file_path": "django\\django\\utils\\dateformat.py", - "ast_data": "FunctionDef name:d arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "parse_et_yaml", - "source_code": "def parse_et_yaml(path: str, tags_yaml_path: str, ignore_keys: set[DispatchKey] | None = None, skip_native_fns_gen: bool = False) -> tuple[list[NativeFunction], dict[OperatorName, dict[str, Any]]]: with open(path) as f: es = yaml.load(f, Loader = LineLoader) et_kernel = extract_kernel_fields(es) strip_et_fields(es) native_yaml = parse_native_yaml(path, tags_yaml_path, ignore_keys, skip_native_fns_gen = skip_native_fns_gen, loaded_yaml = es) return (native_yaml.native_functions, et_kernel)", - "docstring": "Parse native_functions.yaml into NativeFunctions and an Operator Indexed Dict of fields to persist from native_functions.yaml to functions.yaml", - "type": "function", - "file_path": "pytorch\\torchgen\\executorch\\parse.py", - "ast_data": "FunctionDef name:parse_et_yaml arguments arg:path type:str arg:tags_yaml_path type:str arg:ignore_keys type:set[DispatchKey] | None arg:skip_native_fns_gen type:bool With Assign Call call:load Assign Call call:extract_kernel_fields Assign Call call:parse_native_yaml Return return:yes" - }, - { - "library": "coconut", - "name": "get_highest_parse_loc", - "source_code": "def get_highest_parse_loc(original): highest_loc = 0 for lookup, _ in get_cache_items_for(original): loc = lookup[_lookup_loc] if loc > highest_loc: highest_loc = loc return highest_loc", - "docstring": "Get the highest observed parse location. Note that there's no point in filtering for successes/failures, since we always see both at the same locations.", - "type": "function", - "file_path": "coconut\\coconut\\compiler\\util.py", - "ast_data": "FunctionDef name:get_highest_parse_loc arguments arg:original Assign For Call call:get_cache_items_for Assign If Compare op:Gt Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "get_device", - "source_code": "def get_device(node) -> Optional[torch.device]: if 'val' not in node.meta: return None candidates = node.meta['val'] if not isinstance(candidates, tuple): candidates = (candidates,) for candidate in candidates: if isinstance(candidate, torch.Tensor): if candidate.device.type = = 'cuda': return candidate.device return torch.device('cpu')", - "docstring": "Check the example value of the node outputs to find the device type.", - "type": "function", - "file_path": "pytorch\\torch\\_functorch\\partitioners.py", - "ast_data": "FunctionDef name:get_device arguments arg:node If Compare op:NotIn Return return:yes Assign If Assign For If Call call:isinstance If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "flexx", - "name": "user_color", - "source_code": "@event.emitter def user_color(self, color): d = {'old_value': self.color, 'new_value': color} self.set_color(color) return d", - "docstring": "Event emitted when the user changes the color. Has `` attributes.", - "type": "method", - "file_path": "flexx\\flexx\\ui\\widgets\\_color.py", - "ast_data": "FunctionDef name:user_color arguments arg:self arg:color Assign Return return:yes" - }, - { - "library": "django", - "name": "RequestContext", - "source_code": "class RequestContext(Context): def __init__(self, request, dict_ = None, processors = None, use_l10n = None, use_tz = None, autoescape = True): super().__init__(dict_, use_l10n = use_l10n, use_tz = use_tz, autoescape = autoescape) self.request = request self._processors = () if processors is None else tuple(processors) self._processors_index = len(self.dicts) self.update({}) self.update({}) @contextmanager def bind_template(self, template): if self.template is not None: raise RuntimeError('Context is already bound to a template') self.template = template processors = template.engine.template_context_processors + self._processors updates = {} for processor in processors: context = processor(self.request) try: updates.update(context) except TypeError as e: raise TypeError(f\"Context processor {processor.__qualname__} didn't return a dictionary.\") from e self.dicts[self._processors_index] = updates try: yield finally: self.template = None self.dicts[self._processors_index] = {} def new(self, values = None): new_context = super().new(values) if hasattr(new_context, '_processors_index'): del new_context._processors_index return new_context", - "docstring": "This subclass of template.Context automatically populates itself using the processors defined in the engine's configuration. Additional processors can be specified as a list of callables using the \"processors\" keyword argument.", - "type": "class", - "file_path": "django\\django\\template\\context.py", - "ast_data": "ClassDef name:RequestContext FunctionDef name:__init__ arguments arg:self arg:request arg:dict_ arg:processors arg:use_l10n arg:use_tz arg:autoescape Assign Assign Assign Call call:len FunctionDef name:bind_template arguments arg:self arg:template If Compare op:IsNot Raise raises:RuntimeError('Context is already bound to a template') Assign Assign Assign For Assign Call call:processor Try ExceptHandler Raise raises:TypeError(f\"Context processor {processor.__qualname__} didn't return a dictionary.\") Assign Try Assign Assign FunctionDef name:new arguments arg:self arg:values Assign Call call:new If Call call:hasattr Return return:yes" - }, - { - "library": "pytorch", - "name": "mask_loads", - "source_code": "@contextlib.contextmanager def mask_loads(self, mask: Union[str, OpsWrapper], value: Union[int, float]) -> Iterator[str]: prior = self._load_mask prior_val = self._load_other if prior: mask = ops.logical_and(mask, prior) mask = OpsWrapper._unwrap(mask) self._load_mask = mask self._load_other = value try: yield mask finally: self._load_mask = prior self._load_other = prior_val", - "docstring": "Context manager to add an additional mask to tl.load/store", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\simd.py", - "ast_data": "FunctionDef name:mask_loads arguments arg:self arg:mask type:Union[str, OpsWrapper] arg:value type:Union[int, float] Assign Assign If Assign Call call:logical_and Assign Call call:_unwrap Assign Assign Try Assign Assign" - }, - { - "library": "kornia", - "name": "random", - "source_code": "@classmethod def random(cls, batch_size: Optional[int] = None, device: Optional[Device] = None, dtype: Dtype = None) -> Se2: r = So2.random(batch_size, device, dtype) shape: tuple[int, ...] if batch_size is None: shape = (2,) else: KORNIA_CHECK(batch_size > = 1, msg = 'batch_size must be positive') shape = (batch_size, 2) return cls(r, Vector2(rand(shape, device = device, dtype = dtype)))", - "docstring": "Create a Se2 group representing a random transformation. Args: batch_size: the batch size of the underlying data. device: device to place the result on. dtype: dtype of the result. Example: >>> s = Se2.random() >>> s = Se2.random(batch_size=3)", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py", - "ast_data": "FunctionDef name:random arguments arg:cls arg:batch_size type:Optional[int] arg:device type:Optional[Device] arg:dtype type:Dtype Assign Call call:random If Compare op:Is Assign Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_radii", - "source_code": "def get_radii(self): return (self.a, self.b)", - "docstring": "Return the semi-major and semi-minor radii of the annulus.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:get_radii arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, cell): self._cell = cell", - "docstring": "Creates a new BoolGaugeCell. Args: cell: A c pointer of TFE_MonitoringBoolGaugeCell.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:cell Assign" - }, - { - "library": "mongo", - "name": "start_session", - "source_code": "def start_session(self, causal_consistency: Optional[bool] = None, default_transaction_options: Optional[client_session.TransactionOptions] = None, snapshot: Optional[bool] = False) -> client_session.AsyncClientSession: return self._start_session(False, causal_consistency = causal_consistency, default_transaction_options = default_transaction_options, snapshot = snapshot)", - "docstring": "Start a logical session. This method takes the same parameters as :class:. See the :mod: module for details and examples. A :class: may only be used with the AsyncMongoClient that started it. :class: instances are **not thread-safe or fork-safe**. They can only be used by one thread or process at a time. A single :class: cannot be used to run multiple operations concurrently. :return: An instance of :class:. .. versionadded:: 3.6", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\mongo_client.py", - "ast_data": "FunctionDef name:start_session arguments arg:self arg:causal_consistency type:Optional[bool] arg:default_transaction_options type:Optional[client_session.TransactionOptions] arg:snapshot type:Optional[bool] Return return:yes" - }, - { - "library": "tensorflow", - "name": "trainable_weights", - "source_code": "@property def trainable_weights(self): return self.trainable_variables", - "docstring": "List of trainable weights/variables created by the Template.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py", - "ast_data": "FunctionDef name:trainable_weights arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "max_distance_rectangle", - "source_code": "def max_distance_rectangle(self, other, p = 2.0): return minkowski_distance(0, np.maximum(self.maxes - other.mins, other.maxes - self.mins), p)", - "docstring": "Compute the maximum distance between points in the two hyperrectangles. Parameters ---------- other : hyperrectangle Input. p : float, optional Input.", - "type": "method", - "file_path": "scipy\\scipy\\spatial\\_kdtree.py", - "ast_data": "FunctionDef name:max_distance_rectangle arguments arg:self arg:other arg:p Return return:yes" - }, - { - "library": "pytorch", - "name": "true_divide", - "source_code": "@_onnx_symbolic('aten: : true_divide') def true_divide(g: jit_utils.GraphContext, self, other): if symbolic_helper._is_fp(self) or symbolic_helper._is_fp(other): return g.op('Div', self, other) scalar_type = torch.get_default_dtype() onnx_scalar_type = _C_onnx.TensorProtoDataType.FLOAT assert scalar_type is torch.float or scalar_type is torch.double if torch.get_default_dtype() is torch.double: onnx_scalar_type = _C_onnx.TensorProtoDataType.DOUBLE self = g.op('Cast', self, to_i = onnx_scalar_type) other = g.op('Cast', other, to_i = onnx_scalar_type) return g.op('Div', self, other)", - "docstring": "Division where both inputs are cast to floating types If both inputs are floating, performs div as usual If only one input is a floating type, the other input is cast to its type If neither input is a floating type, both inputs are cast to the default scalar type", - "type": "function", - "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py", - "ast_data": "FunctionDef name:true_divide arguments arg:g type:jit_utils.GraphContext arg:self arg:other Call call:_onnx_symbolic If BoolOp Call call:_is_fp Call call:_is_fp Return return:yes Assign Call call:get_default_dtype Assign If Compare op:Is Assign Assign Call call:op Assign Call call:op Return return:yes" - }, - { - "library": "tensorflow", - "name": "as_str", - "source_code": "def as_str(bytes_or_text, encoding = 'utf-8'): return as_text(bytes_or_text, encoding)", - "docstring": "Acts as an alias for the function.. Args: bytes_or_text: The input value to be converted. A bytes or unicode object. encoding: Optional string. The encoding to use if bytes_or_text is a bytes object. Defaults to 'utf-8'. Returns: A unicode string. Raises: TypeError: If bytes_or_text is not a bytes or unicode object. UnicodeDecodeError: If bytes_or_text is a bytes object and cannot be decoded using the specified encoding.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\util\\compat.py", - "ast_data": "FunctionDef name:as_str arguments arg:bytes_or_text arg:encoding Return return:yes" - }, - { - "library": "pytorch", - "name": "get_data", - "source_code": "def get_data(self, name: str, return_original: bool = True): if name not in self.data_groups: raise ValueError('data with specified name does not exist') if return_original: if not parametrize.is_parametrized(self._container, name): raise ValueError('mask squashed - original mask value does not exist') data = getattr(self._container.parametrizations, name).original return data else: return getattr(self._container, name)", - "docstring": "Returns weight tensor (or data) Args: - name: name of the data to be returned - return_original returns weight tensor without applying parametrization if True else - returns the sparsified version (parametrized)", - "type": "method", - "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\base_data_sparsifier.py", - "ast_data": "FunctionDef name:get_data arguments arg:self arg:name type:str arg:return_original type:bool If Compare op:NotIn Raise raises:ValueError('data with specified name does not exist') If If Raise raises:ValueError('mask squashed - original mask value does not exist') Assign Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "tanh", - "source_code": "@dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def tanh(x): return nn.tanh(x)", - "docstring": "Element-wise tanh. Args: x: A tensor or variable. Returns: A tensor.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", - "ast_data": "FunctionDef name:tanh arguments arg:x Return return:yes" - }, - { - "library": "tensorflow", - "name": "wrapper_helper", - "source_code": "def wrapper_helper(*args): nested_args = structure.from_compatible_tensor_list(self._input_structure, args) if not _should_unpack(nested_args): nested_args = (nested_args,) ret = autograph.tf_convert(self._func, ag_ctx)(*nested_args) ret = variable_utils.convert_variables_to_tensors(ret) if _should_pack(ret): ret = tuple(ret) try: self._output_structure = structure.type_spec_from_value(ret) except (ValueError, TypeError) as e: raise TypeError(f'Unsupported return value from function passed to {transformation_name}: {ret}.') from e return ret", - "docstring": "Wrapper for passing nested structures to and from tf.data functions.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\structured_function.py", - "ast_data": "FunctionDef name:wrapper_helper arguments vararg:args Assign Call call:from_compatible_tensor_list If Assign Assign Call Assign Call call:convert_variables_to_tensors If Call call:_should_pack Assign Call call:tuple Try Assign Call call:type_spec_from_value ExceptHandler Raise raises:TypeError(f'Unsupported return value from function passed to {transformation_name}: {ret}.') Return return:yes" - }, - { - "library": "seaborn", - "name": "figure", - "source_code": "@property def figure(self): return self._figure", - "docstring": "Access the :class: object underlying the grid.", - "type": "method", - "file_path": "seaborn\\seaborn\\axisgrid.py", - "ast_data": "FunctionDef name:figure arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "get_fqn_to_example_inputs", - "source_code": "def get_fqn_to_example_inputs(model: torch.nn.Module, example_inputs: tuple[Any, ...]) -> dict[str, tuple[Any, ...]]: root = model fqn_to_example_inputs = {} def _patched_module_call(self, *args, **kwargs): submodule_example_inputs = list(args).copy() normalized_kwargs = _normalize_kwargs(self.forward, kwargs) num_args = _get_num_pos_args(self.forward) - 1 num_to_pop = num_args - len(submodule_example_inputs) while num_to_pop and normalized_kwargs: normalized_kwargs.popitem(last = False) num_to_pop - = 1 submodule_example_inputs.extend(normalized_kwargs.values()) submodule_example_inputs_tuple = tuple(submodule_example_inputs) fqn = _get_path_of_module(root, self) if fqn is not None: fqn_to_example_inputs[fqn] = submodule_example_inputs_tuple return orig_module_call(self, *args, **kwargs) orig_module_call = torch.nn.Module.__call__ torch.nn.Module.__call__ = _patched_module_call try: model(*example_inputs) finally: torch.nn.Module.__call__ = orig_module_call return fqn_to_example_inputs", - "docstring": "Given a model and its example inputs, return a dictionary from fully qualified name of submodules to example_inputs for that submodule, e.g. {\"linear1\": (tensor1,), \"linear2\": (tensor2,), \"sub\": (tensor3,), \"sub.linear1\": (tensor4,), ...} Used to make quantizing submodules easier now that FX Graph Mode Quantization requires example inputs. Also works for keyword arguments with default values, we would flatten keyword arguments as positional arguments and fill in the missing keyword args with default values, e.g. if we have a forward function: def forward(self, x, key1=3, key2=3): ... and we call it with self.submodule(x, key2=6) we'll get example_inputs: (x, 3, 6) user can also override with positional arguments as well: for self.submodule(x, 5, key2=6) we'll get: (x, 5, 6) variable positional arguments and variable positional keyword arguments in forward function are not supported currently, so please make sure no submodules is using them.", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\utils.py", - "ast_data": "FunctionDef name:get_fqn_to_example_inputs arguments arg:model type:torch.nn.Module arg:example_inputs type:tuple[Any, ...] Assign Assign FunctionDef name:_patched_module_call arguments arg:self vararg:args kwarg:kwargs Assign Call call:copy Assign Call call:_normalize_kwargs Assign Assign While BoolOp Assign Call call:tuple Assign Call call:_get_path_of_module If Compare op:IsNot Assign Return return:yes Assign Assign Try Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "report_benchmark", - "source_code": "def report_benchmark(self, iters = None, cpu_time = None, wall_time = None, throughput = None, extras = None, name = None, metrics = None): name = self._get_name(overwrite_name = name) _global_report_benchmark(name = name, iters = iters, cpu_time = cpu_time, wall_time = wall_time, throughput = throughput, extras = extras, metrics = metrics)", - "docstring": "Report a benchmark. Args: iters: (optional) How many iterations were run cpu_time: (optional) Median or mean cpu time in seconds. wall_time: (optional) Median or mean wall time in seconds. throughput: (optional) Throughput (in MB/s) extras: (optional) Dict mapping string keys to additional benchmark info. Values may be either floats or values that are convertible to strings. name: (optional) Override the BenchmarkEntry name with . Otherwise it is inferred from the top-level method name. metrics: (optional) A list of dict, where each dict has the keys below name (required), string, metric name value (required), double, metric value min_value (optional), double, minimum acceptable metric value max_value (optional), double, maximum acceptable metric value", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\platform\\benchmark.py", - "ast_data": "FunctionDef name:report_benchmark arguments arg:self arg:iters arg:cpu_time arg:wall_time arg:throughput arg:extras arg:name arg:metrics Assign Call call:_get_name" - }, - { - "library": "pytorch", - "name": "is_registered", - "source_code": "@property def is_registered(self): return self._registered", - "docstring": "Returns True if the execution trace observer is registered, otherwise False.", - "type": "method", - "file_path": "pytorch\\torch\\profiler\\profiler.py", - "ast_data": "FunctionDef name:is_registered arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "row_splits", - "source_code": "def row_splits(self): return self._row_splits", - "docstring": "Returns the row-split indices for this row partition. specifies where the values for each row begin and end. In particular, the values for row are stored in the slice . Returns: A 1-D integer with shape . The returned tensor is non-empty, and is sorted in ascending order. . .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", - "ast_data": "FunctionDef name:row_splits arguments arg:self Return return:yes" - }, - { - "library": "mongo", - "name": "__init__", - "source_code": "def __init__(self, topology: Topology, name: str, interval: int, min_interval: float): def target() -> bool: monitor = self_ref() if monitor is None: return False monitor._run() return True executor = periodic_executor.PeriodicExecutor(interval = interval, min_interval = min_interval, target = target, name = name) self._executor = executor def _on_topology_gc(dummy: Optional[Topology] = None) -> None: monitor = self_ref() if monitor: monitor.gc_safe_close() self_ref = weakref.ref(self, executor.close) self._topology = weakref.proxy(topology, _on_topology_gc) _register(self)", - "docstring": "Base class to do periodic work on a background thread. The background thread is signaled to stop when the Topology or this instance is freed.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\monitor.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:topology type:Topology arg:name type:str arg:interval type:int arg:min_interval type:float FunctionDef name:target arguments Assign Call call:self_ref If Compare op:Is Return return:yes Return return:yes Assign Call call:PeriodicExecutor Assign FunctionDef name:_on_topology_gc arguments arg:dummy type:Optional[Topology] Assign Call call:self_ref If Assign Call call:ref Assign Call call:proxy" - }, - { - "library": "pandas", - "name": "construct_array_type", - "source_code": "@classmethod def construct_array_type(cls) -> type_t[DatetimeArray]: from pandas.core.arrays import DatetimeArray return DatetimeArray", - "docstring": "Return the array type associated with this dtype. Returns ------- type", - "type": "method", - "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", - "ast_data": "FunctionDef name:construct_array_type arguments arg:cls Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_unpartitioned_shape", - "source_code": "def get_unpartitioned_shape(self, shape): shape = tensor_shape.as_shape(shape) dims = shape.as_list() if self._shard_dimension is None or self._number_of_partitions is None or (not dims): return None if dims[self._shard_dimension] is None: raise ValueError(f'Shape {shape.as_list()} must have a fixed size for dimension {self._shard_dimension} that is known. ') if self._number_of_partitions > 1: dims[self._shard_dimension] * = self._number_of_partitions return tensor_shape.as_shape(dims)", - "docstring": "Returns the shape of an unpartitioned Tensor. When given the shape of a 'sharded-size' Tensor, returns the shape of the full shape of its unpartitioned Tensor. Args: shape: The shape of the sharded Tensor. Returns: The shape of the unpartitioned version of the Tensor. Raises: ValueError: if shape has unknown sharded dimension", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_sharding.py", - "ast_data": "FunctionDef name:get_unpartitioned_shape arguments arg:self arg:shape Assign Call call:as_shape Assign Call call:as_list If BoolOp Compare op:Is Compare op:Is Return return:yes If Compare op:Is Raise raises:ValueError(f'Shape {shape.as_list()} must have a fixed size for dimension {self._shard_dimension} that is known. ') If Compare op:Gt Return return:yes" - }, - { - "library": "tensorflow", - "name": "convert_n_to_tensor_or_composite", - "source_code": "def convert_n_to_tensor_or_composite(values, dtype = None, name = None) -> list[Union[EagerTensor, SymbolicTensor, composite_tensor.CompositeTensor, type(None)]]: return internal_convert_n_to_tensor_or_composite(values = values, dtype = dtype, name = name, as_ref = False)", - "docstring": "Converts to a list of or objects. Any objects in are returned unmodified. Args: values: A list of , convert_to_tensor()DTypeTensorCompositeTensorTensoriname + '_' + iTensorCompositeTensorvalues`. RuntimeError: If a registered conversion function returns an invalid value.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", - "ast_data": "FunctionDef name:convert_n_to_tensor_or_composite arguments arg:values arg:dtype arg:name Return return:yes" - }, - { - "library": "pytorch", - "name": "mark_buffer_mutated", - "source_code": "def mark_buffer_mutated(self, name: str) -> None: assert isinstance(name, str) self.mutated_buffers.add(name) if name not in self.name_to_users: return for user in self.name_to_users[name]: user.realize()", - "docstring": "When a buffer is mutated we need to make sure all the reads to the old version are realized before the mutation happens.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\graph.py", - "ast_data": "FunctionDef name:mark_buffer_mutated arguments arg:self arg:name type:str If Compare op:NotIn Return return:no For" - }, - { - "library": "pytorch", - "name": "update", - "source_code": "def update(self, new_scale: Optional[Union[float, torch.Tensor]] = None) -> None: if not self._enabled: return _scale, _growth_tracker = self._check_scale_growth_tracker('update') if new_scale is not None: if isinstance(new_scale, float): self._scale.fill_(new_scale) else: reason = 'new_scale should be a float or a 1-element torch.cuda.FloatTensor or torch.FloatTensor with requires_grad = False.' assert new_scale.device.type = = self._device, reason assert new_scale.numel() = = 1, reason assert new_scale.requires_grad is False, reason self._scale.copy_(new_scale) else: found_infs = [found_inf.to(device = _scale.device, non_blocking = True) for state in self._per_optimizer_states.values() for found_inf in state['found_inf_per_device'].values()] assert len(found_infs) > 0, 'No inf checks were recorded prior to update.' found_inf_combined = found_infs[0] if len(found_infs) > 1: for i in range(1, len(found_infs)): found_inf_combined + = found_infs[i] if _scale.device.type = = 'cpu': self._amp_update_scale_cpu_(found_inf_combined) else: torch._amp_update_scale_(self._scale, self._growth_tracker, found_inf_combined, self._growth_factor, self._backoff_factor, self._growth_interval) self._per_optimizer_states = defaultdict(_refresh_per_optimizer_state)", - "docstring": "Updates the scale factor. If any optimizer steps were skipped the scale is multiplied by `torch.Tensorupdate` has been invoked for all optimizers used this iteration.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\fsdp\\sharded_grad_scaler.py", - "ast_data": "FunctionDef name:update arguments arg:self arg:new_scale type:Optional[Union[float, torch.Tensor]] If Return return:no Assign Call call:_check_scale_growth_tracker If Compare op:IsNot If Call call:isinstance Assign Assign Assign If Compare op:Gt For Call call:range If Compare op:Eq Assign Call call:defaultdict" - }, - { - "library": "sphinx", - "name": "find_autosummary_in_docstring", - "source_code": "def find_autosummary_in_docstring(name: str, filename: str | os.PathLike[str] | None = None) -> list[AutosummaryEntry]: try: _real_name, obj, _parent, _modname = import_by_name(name) lines = pydoc.getdoc(obj).splitlines() return find_autosummary_in_lines(lines, module = name, filename = filename) except AttributeError: pass except ImportExceptionGroup as exc: errors = '\\n'.join({f'* {type(e).__name__}: {e}' for e in exc.exceptions}) logger.warning(f'Failed to import {name}.\\nPossible hints: \\n{errors}') except SystemExit: logger.warning(\"Failed to import '%s'; the module executes module level statement and it might call sys.exit().\", name) return []", - "docstring": "Find out what items are documented in the given object's docstring. See .", - "type": "function", - "file_path": "sphinx\\sphinx\\ext\\autosummary\\generate.py", - "ast_data": "FunctionDef name:find_autosummary_in_docstring arguments arg:name type:str arg:filename type:str | os.PathLike[str] | None Try Assign Call call:import_by_name Assign Call call:splitlines Return return:yes ExceptHandler ExceptHandler Assign Call call:join ExceptHandler Return return:yes" - }, - { - "library": "cherrypy", - "name": "flatten", - "source_code": "def flatten(debug = False): def flattener(input): numchunks = 0 for x in input: if not is_iterator(x): numchunks + = 1 yield x else: for y in flattener(x): numchunks + = 1 yield y if debug: cherrypy.log('Flattened %d chunks' % numchunks, 'TOOLS.FLATTEN') response = cherrypy.serving.response response.body = flattener(response.body)", - "docstring": "Wrap response.body in a generator that recursively iterates over body. This allows cherrypy.response.body to consist of 'nested generators'; that is, a set of generators that yield generators.", - "type": "function", - "file_path": "cherrypy\\cherrypy\\lib\\cptools.py", - "ast_data": "FunctionDef name:flatten arguments arg:debug FunctionDef name:flattener arguments arg:input Assign For If For Call call:flattener If Assign Assign Call call:flattener" - }, - { - "library": "matplotlib", - "name": "pstoeps", - "source_code": "def pstoeps(tmpfile, bbox = None, rotated = False): epsfile = tmpfile + '.eps' with open(epsfile, 'wb') as epsh, open(tmpfile, 'rb') as tmph: write = epsh.write for line in tmph: if line.startswith(b'%!PS'): write(b'%!PS-Adobe-3.0 EPSF-3.0\\n') if bbox: write(_get_bbox_header(bbox).encode('ascii') + b'\\n') elif line.startswith(b'%%EndComments'): write(line) write(b'%%BeginProlog\\nsave\\ncountdictstack\\nmark\\nnewpath\\n/showpage {} def\\n/setpagedevice {pop} def\\n%%EndProlog\\n%%Page 1 1\\n') if rotated: write(_get_rotate_command(bbox).encode('ascii') + b'\\n') break elif bbox and line.startswith((b'%%Bound', b'%%HiResBound', b'%%DocumentMedia', b'%%Pages')): pass else: write(line) for line in tmph: if line.startswith(b'%%EOF'): write(b'cleartomark\\ncountdictstack\\nexch sub { end } repeat\\nrestore\\nshowpage\\n%%EOF\\n') elif line.startswith(b'%%PageBoundingBox'): pass else: write(line) os.remove(tmpfile) shutil.move(epsfile, tmpfile)", - "docstring": "Convert the postscript to encapsulated postscript. The bbox of the eps file will be replaced with the given *bbox* argument. If None, original bbox will be used.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_ps.py", - "ast_data": "FunctionDef name:pstoeps arguments arg:tmpfile arg:bbox arg:rotated Assign With Assign For If Call call:startswith If If Call call:startswith If If BoolOp Call call:startswith For If Call call:startswith If Call call:startswith" - }, - { - "library": "tensorflow", - "name": "experimental_type_proto", - "source_code": "@classmethod def experimental_type_proto(cls) -> Type[tensor_shape_pb2.TensorShapeProto]: return tensor_shape_pb2.TensorShapeProto", - "docstring": "Returns the type of proto associated with TensorShape serialization.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py", - "ast_data": "FunctionDef name:experimental_type_proto arguments arg:cls Return return:yes" - }, - { - "library": "pytorch", - "name": "map_nodes_to_values", - "source_code": "@compatibility(is_backward_compatible = True) def map_nodes_to_values(self, args: Argument, n: Node) -> Argument: def load_arg(n_arg: Node) -> Any: if n_arg not in self.env: raise RuntimeError(f'Node {n} referenced nonexistent value {n_arg}! Run Graph.lint() to diagnose such issues') return self.env[n_arg] return map_arg(args, load_arg)", - "docstring": "Recursively descend through `` belongs. This is only used for error reporting.", - "type": "method", - "file_path": "pytorch\\torch\\fx\\interpreter.py", - "ast_data": "FunctionDef name:map_nodes_to_values arguments arg:self arg:args type:Argument arg:n type:Node Call call:compatibility FunctionDef name:load_arg arguments arg:n_arg type:Node If Compare op:NotIn Raise raises:RuntimeError(f'Node {n} referenced nonexistent value {n_arg}! Run Graph.lint() to diagnose such issues') Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "batch", - "source_code": "def batch(self, spec, batch_size): def batch_field(f): if isinstance(f, type_spec.BatchableTypeSpec): return f.__batch_encoder__.batch(f, batch_size) elif isinstance(f, tensor_shape.TensorShape): return [batch_size] + f else: return f fields = tuple(spec.__dict__.items()) batched_fields = nest.map_structure(batch_field, fields) return _create_object_from_type_and_dict(type(spec), batched_fields)", - "docstring": "Returns the TypeSpec representing a batch of values described by . The default definition returns a that is equal to , except that an outer axis with size is added to every nested and field. Subclasses may override this default definition, when necessary. Args: spec: The for an individual value. batch_size: An indicating the number of values that are batched together, or if the batch size is not known. Returns: A for a batch of values.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py", - "ast_data": "FunctionDef name:batch arguments arg:self arg:spec arg:batch_size FunctionDef name:batch_field arguments arg:f If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes Return return:yes Assign Call call:tuple Assign Call call:map_structure Return return:yes" - }, - { - "library": "django", - "name": "paragraph", - "source_code": "def paragraph(): return ' '.join((sentence() for i in range(random.randint(1, 4))))", - "docstring": "Return a randomly generated paragraph of lorem ipsum text. The paragraph consists of between 1 and 4 sentences, inclusive.", - "type": "function", - "file_path": "django\\django\\utils\\lorem_ipsum.py", - "ast_data": "FunctionDef name:paragraph arguments Return return:yes" - }, - { - "library": "pytorch", - "name": "__str__", - "source_code": "def __str__(self) -> str: args: list[str] = [] for field in dataclasses.fields(self): if field.name = = 'wrapper': continue val = getattr(self, field.name) args.append(f'{field.name} = {(val.get_name() if field.type is ir.Buffer else val)}') return f'{type(self).__name__}({', '.join(args)})'", - "docstring": "Emits a string representation that fits on one line.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper.py", - "ast_data": "FunctionDef name:__str__ arguments arg:self For Call call:fields If Compare op:Eq Assign Call call:getattr Return return:yes" - }, - { - "library": "tensorflow", - "name": "make_gradient_clipnorm_fn", - "source_code": "def make_gradient_clipnorm_fn(clipnorm): if clipnorm is None: return lambda grads_and_vars: grads_and_vars def gradient_clipnorm_fn(grads_and_vars): if isinstance(distribute_lib.get_strategy(), (central_storage_strategy.CentralStorageStrategy, central_storage_strategy.CentralStorageStrategyV1)): raise ValueError('`clipnorm` is not supported with `CenteralStorageStrategy`') clipped_grads_and_vars = [(clip_ops.clip_by_norm(g, clipnorm), v) for g, v in grads_and_vars] return clipped_grads_and_vars return gradient_clipnorm_fn", - "docstring": "Creates a gradient transformation function for clipping by norm.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\utils.py", - "ast_data": "FunctionDef name:make_gradient_clipnorm_fn arguments arg:clipnorm If Compare op:Is Return return:yes FunctionDef name:gradient_clipnorm_fn arguments arg:grads_and_vars If Call call:isinstance Raise raises:ValueError('`clipnorm` is not supported with `CenteralStorageStrategy`') Assign Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "full_name_node", - "source_code": "def full_name_node(name, ctx = ast.Load()): names = name.split('.') names.reverse() node = ast.Name(id = names.pop(), ctx = ast.Load()) while names: node = ast.Attribute(value = node, attr = names.pop(), ctx = ast.Load()) node.ctx = ctx return node", - "docstring": "Make an Attribute or Name node for name. Translate a qualified name into nested Attribute nodes (and a Name node). Args: name: The name to translate to a node. ctx: What context this name is used in. Defaults to Load() Returns: A Name or Attribute node.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py", - "ast_data": "FunctionDef name:full_name_node arguments arg:name arg:ctx Assign Call call:split Assign Call call:Name While Assign Call call:Attribute Assign Return return:yes" - }, - { - "library": "numpy", - "name": "get_printoptions", - "source_code": "@set_module('numpy') def get_printoptions(): opts = format_options.get().copy() opts['legacy'] = {113: '1.13', 121: '1.21', 125: '1.25', 201: '2.1', 202: '2.2', sys.maxsize: False}[opts['legacy']] return opts", - "docstring": "Return the current print options. Returns ------- print_opts : dict Dictionary of current print options with keys - precision : int - threshold : int - edgeitems : int - linewidth : int - suppress : bool - nanstr : str - infstr : str - sign : str - formatter : dict of callables - floatmode : str - legacy : str or False For a full description of these options, see . See Also -------- set_printoptions, printoptions Examples -------- >>> import numpy as np >>> np.get_printoptions() {'edgeitems': 3, 'threshold': 1000, ..., 'override_repr': None} >>> np.get_printoptions()['linewidth'] 75 >>> np.set_printoptions(linewidth=100) >>> np.get_printoptions()['linewidth'] 100", - "type": "function", - "file_path": "numpy\\numpy\\_core\\arrayprint.py", - "ast_data": "FunctionDef name:get_printoptions arguments Call call:set_module Assign Call call:copy Assign Return return:yes" - }, - { - "library": "cryptography", - "name": "rsa_recover_private_exponent", - "source_code": "def rsa_recover_private_exponent(e: int, p: int, q: int) -> int: lambda_n = (p - 1) * (q - 1) // gcd(p - 1, q - 1) return _modinv(e, lambda_n)", - "docstring": "Compute the RSA private_exponent (d) given the public exponent (e) and the RSA primes p and q. This uses the Carmichael totient function to generate the smallest possible working value of the private exponent.", - "type": "function", - "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\rsa.py", - "ast_data": "FunctionDef name:rsa_recover_private_exponent arguments arg:e type:int arg:p type:int arg:q type:int Assign Return return:yes" - }, - { - "library": "scikit-learn", - "name": "RegressorTags", - "source_code": "@dataclass(slots = True) class RegressorTags: poor_score: bool = False", - "docstring": "Tags for the regressor. Parameters ---------- poor_score : bool, default=False Whether the estimator fails to provide a \"reasonable\" test-set score, which currently for regression is an R2 of 0.5 on ``. The dataset and values are based on current estimators in scikit-learn and might be replaced by something more systematic.", - "type": "class", - "file_path": "scikit-learn\\sklearn\\utils\\_tags.py", - "ast_data": "ClassDef name:RegressorTags Call call:dataclass" - }, - { - "library": "numpy", - "name": "hermeadd", - "source_code": "def hermeadd(c1, c2): return pu._add(c1, c2)", - "docstring": "Add one Hermite series to another. Returns the sum of two Hermite series + . The arguments are sequences of coefficients ordered from lowest order term to highest, i.e., [1,2,3] represents the series ``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Array representing the Hermite series of their sum. See Also -------- hermesub, hermemulx, hermemul, hermediv, hermepow Notes ----- Unlike multiplication, division, etc., the sum of two Hermite series is a Hermite series (without having to \"reproject\" the result onto the basis set) so addition, just like that of \"standard\" polynomials, is simply \"component-wise.\" Examples -------- >>> from numpy.polynomial.hermite_e import hermeadd >>> hermeadd([1, 2, 3], [1, 2, 3, 4]) array([2., 4., 6., 4.])", - "type": "function", - "file_path": "numpy\\numpy\\polynomial\\hermite_e.py", - "ast_data": "FunctionDef name:hermeadd arguments arg:c1 arg:c2 Return return:yes" - }, - { - "library": "django", - "name": "srid", - "source_code": "@property def srid(self): s = capi.geos_get_srid(self.ptr) if s = = 0: return None else: return s", - "docstring": "Get the SRID for the geometry. Return None if no SRID is set.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", - "ast_data": "FunctionDef name:srid arguments arg:self Assign Call call:geos_get_srid If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "@deprecation.deprecated(None, 'use distribute.MultiWorkerMirroredStrategy instead') def __init__(self, communication = collective_util.CommunicationImplementation.AUTO, cluster_resolver = None): communication_options = collective_util.Options(implementation = communication) super(_CollectiveAllReduceStrategyExperimental, self).__init__(cluster_resolver, communication_options)", - "docstring": "Creates the strategy. Args: communication: optional . This is a hint on the preferred collective communication implementation. Possible values include , , and . cluster_resolver: optional . If , is used.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_all_reduce_strategy.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:communication arg:cluster_resolver Call call:deprecated Assign Call call:Options" - }, - { - "library": "tensorflow", - "name": "GlobalStepWaiterHook", - "source_code": "@tf_export(v1 = ['train.GlobalStepWaiterHook']) class GlobalStepWaiterHook(session_run_hook.SessionRunHook): def __init__(self, wait_until_step): self._wait_until_step = wait_until_step def begin(self): self._worker_is_started = False self._global_step_tensor = training_util._get_or_create_global_step_read() if self._global_step_tensor is None: raise RuntimeError('Global step should be created to use _GlobalStepWaiterHook.') def before_run(self, run_context): if self._worker_is_started: return None if self._wait_until_step < = 0: self._worker_is_started = True return None logging.info('Waiting for global step %d before starting training.', self._wait_until_step) last_logged_step = 0 while True: current_step = run_context.session.run(self._global_step_tensor) if current_step > = self._wait_until_step: self._worker_is_started = True return None if current_step - last_logged_step > 1000: logging.info('Waiting for global step %d before starting training. Current step is %d.', self._wait_until_step, current_step) last_logged_step = current_step time.sleep(0.5)", - "docstring": "Delays execution until global step reaches . This hook delays execution until global step reaches to . It is used to gradually start workers in distributed settings. One example usage would be setting assuming that task_id=0 is the chief.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\training\\basic_session_run_hooks.py", - "ast_data": "ClassDef name:GlobalStepWaiterHook Call call:tf_export FunctionDef name:__init__ arguments arg:self arg:wait_until_step Assign FunctionDef name:begin arguments arg:self Assign Assign Call call:_get_or_create_global_step_read If Compare op:Is Raise raises:RuntimeError('Global step should be created to use _GlobalStepWaiterHook.') FunctionDef name:before_run arguments arg:self arg:run_context If Return return:yes If Compare op:LtE Assign Return return:yes Assign While Assign Call call:run If Compare op:GtE Assign Return return:yes If Compare op:Gt Assign" - }, - { - "library": "pytorch", - "name": "get_device_module", - "source_code": "@functools.cache def get_device_module(device: _Optional[_Union[torch.device, str]] = None): if isinstance(device, torch.device): device_module_name = device.type elif isinstance(device, str): device_module_name = torch.device(device).type elif device is None: device_module_name = torch._C._get_accelerator().type else: raise RuntimeError(f\"Invalid value of device '{device}', expect torch.device, str, or None\") device_module = getattr(torch, device_module_name, None) if device_module is None: raise RuntimeError(f\"Device '{device_module_name}' does not have a corresponding module registered as 'torch.{device_module_name}'.\") return device_module", - "docstring": "Returns the module associated with a given device(e.g., torch.device('cuda'), \"mtia:0\", \"xpu\", ...). If no device is given, return the module for the current accelerator or CPU if none is present.", - "type": "function", - "file_path": "pytorch\\torch\\__init__.py", - "ast_data": "FunctionDef name:get_device_module arguments arg:device type:_Optional[_Union[torch.device, str]] If Call call:isinstance Assign If Call call:isinstance Assign If Compare op:Is Assign Raise raises:RuntimeError(f\"Invalid value of device '{device}', expect torch.device, str, or None\") Assign Call call:getattr If Compare op:Is Raise raises:RuntimeError(f\"Device '{device_module_name}' does not have a corresponding module registered as 'torch.{device_module_name}'.\") Return return:yes" - }, - { - "library": "matplotlib", - "name": "clabel", - "source_code": "def clabel(self, *args, **kwargs): return None", - "docstring": "Currently not implemented for 3D Axes, and returns *None*.", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", - "ast_data": "FunctionDef name:clabel arguments arg:self vararg:args kwarg:kwargs Return return:yes" - }, - { - "library": "tensorflow", - "name": "spatial_2d_padding", - "source_code": "@dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def spatial_2d_padding(x, padding = ((1, 1), (1, 1)), data_format = None): assert len(padding) = = 2 assert len(padding[0]) = = 2 assert len(padding[1]) = = 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) if data_format = = 'channels_first': pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])] else: pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]] return array_ops.pad(x, pattern)", - "docstring": "Pads the 2nd and 3rd dimensions of a 4D tensor. Args: x: Tensor or variable. padding: Tuple of 2 tuples, padding pattern. data_format: One of or . Returns: A padded 4D tensor. Raises: ValueError: if is neither or .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", - "ast_data": "FunctionDef name:spatial_2d_padding arguments arg:x arg:padding arg:data_format If Compare op:Is Assign Call call:image_data_format If Compare op:NotIn Raise raises:ValueError('Unknown data_format: ' + str(data_format)) If Compare op:Eq Assign Assign Return return:yes" - }, - { - "library": "kornia", - "name": "KORNIA_CHECK_IS_IMAGE", - "source_code": "def KORNIA_CHECK_IS_IMAGE(x: Tensor, msg: Optional[str] = None, raises: bool = True, bits: int = 8) -> bool: if not raises and (not KORNIA_CHECK_IS_COLOR_OR_GRAY(x, msg, raises)): return False min_val, max_val = (x.min(), x.max()) if x.dtype in [float16, float32, float64]: if min_val < 0.0 or max_val > 1.0: return _handle_invalid_range(msg, raises, min_val, max_val) else: max_int_value = 2 ** bits - 1 if min_val < 0 or max_val > max_int_value: return _handle_invalid_range(msg, raises, min_val, max_val) return True", - "docstring": "Check whether an image tensor is ranged properly [0, 1] for float or [0, 2 ** bits] for int. Args: x: image tensor to evaluate. msg: message to show in the exception. raises: bool indicating whether an exception should be raised upon failure. bits: the image bits. The default checks if given integer input image is an 8-bit image (0-255) or not. Raises: TypeException: if all the input tensor has not 1) a shape :math:, 2) [0, 1] for float or [0, 255] for int, 3) and raises is True. Example: >>> img = torch.rand(2, 3, 4, 4) >>> KORNIA_CHECK_IS_IMAGE(img, \"It is not an image\") True", - "type": "function", - "file_path": "kornia\\kornia\\core\\check.py", - "ast_data": "FunctionDef name:KORNIA_CHECK_IS_IMAGE arguments arg:x type:Tensor arg:msg type:Optional[str] arg:raises type:bool arg:bits type:int If BoolOp Return return:yes Assign If Compare op:In If BoolOp Compare op:Lt Compare op:Gt Return return:yes Assign If BoolOp Compare op:Lt Compare op:Gt Return return:yes Return return:yes" - }, - { - "library": "authlib", - "name": "deserialize", - "source_code": "def deserialize(self, obj, key, decode = None, sender_key = None): if isinstance(obj, dict): return self.deserialize_json(obj, key, decode, sender_key) obj = to_bytes(obj) if obj.startswith(b'{') and obj.endswith(b'}'): return self.deserialize_json(obj, key, decode, sender_key) return self.deserialize_compact(obj, key, decode, sender_key)", - "docstring": "Extract a JWE Serialization. It supports both compact and JSON serialization. :param obj: JWE compact serialization as bytes or JWE JSON serialization as dict or str :param key: Private key used to decrypt payload (optionally can be a tuple of kid and essentially key) :param decode: Function to decode payload data :param sender_key: Sender's public key in case JWEAlgorithmWithTagAwareKeyAgreement is used :return: dict with and keys", - "type": "method", - "file_path": "authlib\\authlib\\jose\\rfc7516\\jwe.py", - "ast_data": "FunctionDef name:deserialize arguments arg:self arg:obj arg:key arg:decode arg:sender_key If Call call:isinstance Return return:yes Assign Call call:to_bytes If BoolOp Call call:startswith Call call:endswith Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "dump_to_minify", - "source_code": "def dump_to_minify(exported_program: ExportedProgram, compiler_name: str, command: str = 'minify', options: Optional[dict[str, Any]] = None): assert command in ['minify', 'run'] subdir = os.path.join(minifier_dir(), 'checkpoints') if not os.path.exists(subdir): os.makedirs(subdir, exist_ok = True) if command = = 'minify': out = io.StringIO() save_graph_repro_ep(out, compiler_name, exported_program = exported_program, save_dir = subdir, command = 'minify', config_patches = options) return helper_for_dump_minify(out.getvalue()) else: curdir = os.getcwd() file_name = os.path.join(curdir, 'repro.py') try: with open(file_name, 'w') as fd: save_graph_repro_ep(fd, compiler_name, exported_program = exported_program, config_patches = options, save_dir = subdir, command = 'run', module_in_comment = True) log.warning('Writing repro file to %s', file_name) if use_buck: BuckTargetWriter(file_name).write() except OSError: log.warning('No write permissions for %s', file_name)", - "docstring": "If command is \"minify\": Dump exported_program to , with minify command. If command is \"run\": Dump exported_program to , with run command.", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\repro\\aoti.py", - "ast_data": "FunctionDef name:dump_to_minify arguments arg:exported_program type:ExportedProgram arg:compiler_name type:str arg:command type:str arg:options type:Optional[dict[str, Any]] Assign Call call:join If If Compare op:Eq Assign Call call:StringIO Return return:yes Assign Call call:getcwd Assign Call call:join Try With If ExceptHandler" - }, - { - "library": "pytorch", - "name": "is_exception_branch", - "source_code": "def is_exception_branch(branch: str) -> bool: return branch.split('/')[0] in {'main', 'nightly', 'release', 'landchecks'}", - "docstring": "Branches that get opted out of experiments by default, until they're explicitly enabled.", - "type": "function", - "file_path": "pytorch\\.github\\scripts\\runner_determinator.py", - "ast_data": "FunctionDef name:is_exception_branch arguments arg:branch type:str Return return:yes" - }, - { - "library": "coconut", - "name": "add_coconut_to_path", - "source_code": "def add_coconut_to_path(): try: import coconut except ImportError: sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))", - "docstring": "Adds coconut to sys.path if it isn't there already.", - "type": "function", - "file_path": "coconut\\coconut\\main.py", - "ast_data": "FunctionDef name:add_coconut_to_path arguments Try ExceptHandler" - }, - { - "library": "pytorch", - "name": "flatten_graph_inputs", - "source_code": "def flatten_graph_inputs(gm: torch.fx.GraphModule, inputs, compile_gm): inputs_idx_to_clear = [i for i, node in enumerate(gm.graph.nodes) if node.op = = 'placeholder' and node.meta.get('steal_arg', False)] if torch._dynamo.compiled_autograd.in_compiled_autograd_region: assert inputs_idx_to_clear = = [0] assert isinstance(inputs[0], list) boxed_inputs_count = len(inputs[0]) def flatten_fn(args): return args[0] + list(args[1:]) def unflatten_fn(flat_args): return (flat_args[: boxed_inputs_count], *flat_args[boxed_inputs_count:]) compiled_fn = compile_gm(GmWrapper(gm, unflatten_fn), flatten_fn(inputs)) else: flat_inputs, spec = pytree.tree_flatten(inputs) unflatten_fn = functools.partial(pytree.tree_unflatten, treespec = spec) compiled_fn = compile_gm(GmWrapper(gm, unflatten_fn), flat_inputs) flatten_fn = pytree.arg_tree_leaves def wrapper(*args): flat_args = flatten_fn(args) for i in inputs_idx_to_clear: args[i].clear() return compiled_fn(flat_args) return wrapper", - "docstring": "Mutate inputs so that they are flat and wrap gm such that it accepts those inputs. This is needed for graphs that take bumpy inputs.", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\utils.py", - "ast_data": "FunctionDef name:flatten_graph_inputs arguments arg:gm type:torch.fx.GraphModule arg:inputs arg:compile_gm Assign If Assign Call call:len FunctionDef name:flatten_fn arguments arg:args Return return:yes FunctionDef name:unflatten_fn arguments arg:flat_args Return return:yes Assign Call call:compile_gm Assign Call call:tree_flatten Assign Call call:partial Assign Call call:compile_gm Assign FunctionDef name:wrapper arguments vararg:args Assign Call call:flatten_fn For Return return:yes Return return:yes" - }, - { - "library": "mongo", - "name": "drop_collection", - "source_code": "@_csot.apply def drop_collection(self, name_or_collection: Union[str, Collection[_DocumentTypeArg]], session: Optional[ClientSession] = None, comment: Optional[Any] = None, encrypted_fields: Optional[Mapping[str, Any]] = None) -> dict[str, Any]: name = name_or_collection if isinstance(name, Collection): name = name.name if not isinstance(name, str): raise TypeError(f'name_or_collection must be an instance of str, not {type(name)}') encrypted_fields = self._get_encrypted_fields({'encryptedFields': encrypted_fields}, name, True) if encrypted_fields: common.validate_is_mapping('encrypted_fields', encrypted_fields) self._drop_helper(_esc_coll_name(encrypted_fields, name), session = session, comment = comment) self._drop_helper(_ecoc_coll_name(encrypted_fields, name), session = session, comment = comment) return self._drop_helper(name, session, comment)", - "docstring": "Drop a collection. :param name_or_collection: the name of a collection to drop or the collection object itself :param session: a :class:. :param comment: A user-provided comment to attach to this command. :param encrypted_fields: **(BETA)** Document that describes the encrypted fields for Queryable Encryption. For example:: { \"escCollection\": \"enxcol_.encryptedCollection.esc\", \"ecocCollection\": \"enxcol_.encryptedCollection.ecoc\", \"fields\": [ { \"path\": \"firstName\", \"keyId\": Binary.from_uuid(UUID('00000000-0000-0000-0000-000000000000')), \"bsonType\": \"string\", \"queries\": {\"queryType\": \"equality\"} }, { \"path\": \"ssn\", \"keyId\": Binary.from_uuid(UUID('04104104-1041-0410-4104-104104104104')), \"bsonType\": \"string\" } ] } .. note:: The :attr: of this database is automatically applied to this operation. .. versionchanged:: 4.2 Added `` parameter. .. versionchanged:: 3.4 Apply this database's write concern automatically to this operation when connected to MongoDB >= 3.4.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\database.py", - "ast_data": "FunctionDef name:drop_collection arguments arg:self arg:name_or_collection type:Union[str, Collection[_DocumentTypeArg]] arg:session type:Optional[ClientSession] arg:comment type:Optional[Any] arg:encrypted_fields type:Optional[Mapping[str, Any]] Assign If Call call:isinstance Assign If Raise raises:TypeError(f'name_or_collection must be an instance of str, not {type(name)}') Assign Call call:_get_encrypted_fields If Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_input_details", - "source_code": "def get_input_details(self): return [self._get_tensor_details(i, subgraph_index = 0) for i in self._interpreter.InputIndices()]", - "docstring": "Gets model input tensor details. Returns: A list in which each item is a dictionary with details about an input tensor. Each dictionary contains the following fields that describe the tensor: + : The tensor name. + : The tensor index in the interpreter. + : The shape of the tensor. + : Same as for models with known/fixed shapes. If any dimension sizes are unknown, they are indicated with . + : The numpy data type (such as or ). + : Deprecated, use . This field only works for per-tensor quantization, whereas works in all cases. + : A dictionary of parameters used to quantize the tensor: ~ : List of scales (one if per-tensor quantization). ~ : List of zero_points (one if per-tensor quantization). ~ : Specifies the dimension of per-axis quantization, in the case of multiple scales/zero_points. + : A dictionary of parameters used to encode a sparse tensor. This is empty if the tensor is dense.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py", - "ast_data": "FunctionDef name:get_input_details arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "add_onnx_graph", - "source_code": "def add_onnx_graph(self, graph, walltime = None): event = event_pb2.Event(graph_def = graph.SerializeToString()) self.add_event(event, None, walltime)", - "docstring": "Add a protocol buffer to the event file. Args: graph: A protocol buffer. walltime: float. Optional walltime to override the default (current) _get_file_writerfrom time.time())", - "type": "method", - "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py", - "ast_data": "FunctionDef name:add_onnx_graph arguments arg:self arg:graph arg:walltime Assign Call call:Event" - }, - { - "library": "scipy", - "name": "check_answer", - "source_code": "def check_answer(self, x, ftol): if self.lb is not None and np.any(x < self.lb) or (self.ub is not None and np.any(x > self.ub)): return False f = np.sum(self.fun(x) ** 2) return f < (1 + ftol) * self.fopt", - "docstring": "Check if yields the objective value close enough to the optimal value. Parameters ---------- x : ndarray, shape (n,) The point to test. ftol : float Maximum allowed relative error in the objective function value. Returns ------- bool Whether is optimal enough. If violates bounds constraints then False is returned.", - "type": "method", - "file_path": "scipy\\benchmarks\\benchmarks\\lsq_problems.py", - "ast_data": "FunctionDef name:check_answer arguments arg:self arg:x arg:ftol If BoolOp BoolOp Compare op:IsNot Call call:any BoolOp Compare op:IsNot Call call:any Return return:yes Assign Call call:sum Return return:yes" - }, - { - "library": "tensorflow", - "name": "convert", - "source_code": "@_export_metrics def convert(self): return super(TFLiteSavedModelConverter, self).convert()", - "docstring": "Converts a TensorFlow GraphDef based on instance variables. Note that in the converted TensorFlow Lite model, the input tensor's order might be changed each time is called. To access input tensor information, please consider using the API (). Returns: The converted data in serialized format, either a TFLite Flatbuffer or a Graphviz graph depending on value in . Raises: ValueError: Input shape is not specified. None value for dimension in input_tensor.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", - "ast_data": "FunctionDef name:convert arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "change_D", - "source_code": "def change_D(D, order, factor): R = compute_R(order, factor) U = compute_R(order, 1) RU = R.dot(U) D[: order + 1] = np.dot(RU.T, D[: order + 1])", - "docstring": "Change differences array in-place when step size is changed.", - "type": "function", - "file_path": "scipy\\scipy\\integrate\\_ivp\\bdf.py", - "ast_data": "FunctionDef name:change_D arguments arg:D arg:order arg:factor Assign Call call:compute_R Assign Call call:compute_R Assign Call call:dot Assign Call call:dot" - }, - { - "library": "tensorflow", - "name": "get_checkpoint_mtimes", - "source_code": "@deprecation.deprecated(date = None, instructions = 'Use standard file utilities to get mtimes.') @tf_export(v1 = ['train.get_checkpoint_mtimes']) def get_checkpoint_mtimes(checkpoint_prefixes): mtimes = [] def match_maybe_append(pathname): fnames = file_io.get_matching_files(pathname) if fnames: mtimes.append(file_io.stat(fnames[0]).mtime_nsec / 1000000000.0) return True return False for checkpoint_prefix in checkpoint_prefixes: pathname = _prefix_to_checkpoint_path(checkpoint_prefix, saver_pb2.SaverDef.V2) if match_maybe_append(pathname): continue match_maybe_append(checkpoint_prefix) return mtimes", - "docstring": "Returns the mtimes (modification timestamps) of the checkpoints. Globs for the checkpoints pointed to by . If the files exist, collect their mtime. Both V2 and V1 checkpoints are considered, in that priority. This is the recommended way to get the mtimes, since it takes into account the naming difference between V1 and V2 formats. Note: If not all checkpoints exist, the length of the returned mtimes list will be smaller than the length of list, so mapping checkpoints to corresponding mtimes will not be possible. Args: checkpoint_prefixes: a list of checkpoint paths, typically the results of or those of , regardless of sharded/non-sharded or V1/V2. Returns: A list of mtimes (in microseconds) of the found checkpoints.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint_management.py", - "ast_data": "FunctionDef name:get_checkpoint_mtimes arguments arg:checkpoint_prefixes Call call:deprecated Call call:tf_export Assign FunctionDef name:match_maybe_append arguments arg:pathname Assign Call call:get_matching_files If Return return:yes Return return:yes For Assign Call call:_prefix_to_checkpoint_path If Call call:match_maybe_append Return return:yes" - }, - { - "library": "tensorflow", - "name": "trainable_variables", - "source_code": "@property def trainable_variables(self): return tuple(self._func_graph.trainable_variables)", - "docstring": "Sequence of trainable variables for this function.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py", - "ast_data": "FunctionDef name:trainable_variables arguments arg:self Return return:yes" - }, - { - "library": "pandas", - "name": "map_array", - "source_code": "def map_array(arr: ArrayLike, mapper, na_action: Literal['ignore'] | None = None) -> np.ndarray | ExtensionArray | Index: from pandas import Index if na_action not in (None, 'ignore'): msg = f\"na_action must either be 'ignore' or None, {na_action} was passed\" raise ValueError(msg) if is_dict_like(mapper): if isinstance(mapper, dict) and hasattr(mapper, '__missing__'): dict_with_default = mapper mapper = lambda x: dict_with_default[np.nan if isinstance(x, float) and np.isnan(x) else x] else: from pandas import Series if len(mapper) = = 0: mapper = Series(mapper, dtype = np.float64) elif isinstance(mapper, dict): mapper = Series(mapper.values(), index = Index(mapper.keys(), tupleize_cols = False)) else: mapper = Series(mapper) if isinstance(mapper, ABCSeries): if na_action = = 'ignore': mapper = mapper[mapper.index.notna()] indexer = mapper.index.get_indexer(arr) new_values = take_nd(mapper._values, indexer) return new_values if not len(arr): return arr.copy() values = arr.astype(object, copy = False) if na_action is None: return lib.map_infer(values, mapper) else: return lib.map_infer_mask(values, mapper, mask = isna(values).view(np.uint8))", - "docstring": "Map values using an input mapping or function. Parameters ---------- mapper : function, dict, or Series Mapping correspondence. na_action : {None, 'ignore'}, default None If 'ignore', propagate NA values, without passing them to the mapping correspondence. Returns ------- Union[ndarray, Index, ExtensionArray] The output of the mapping function applied to the array. If the function returns a tuple with more than one element a MultiIndex will be returned.", - "type": "function", - "file_path": "pandas\\pandas\\core\\algorithms.py", - "ast_data": "FunctionDef name:map_array arguments arg:arr type:ArrayLike arg:mapper arg:na_action type:Literal['ignore'] | None If Compare op:NotIn Assign Raise raises:ValueError(msg) If Call call:is_dict_like If BoolOp Call call:isinstance Call call:hasattr Assign Assign If Compare op:Eq Assign Call call:Series If Call call:isinstance Assign Call call:Series Assign Call call:Series If Call call:isinstance If Compare op:Eq Assign Assign Call call:get_indexer Assign Call call:take_nd Return return:yes If Return return:yes Assign Call call:astype If Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "__call__", - "source_code": "def __call__(self, x): return 0.0", - "docstring": "Compute a regularization penalty from an input tensor.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\regularizers.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:x Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_slot_names", - "source_code": "def get_slot_names(self, *args, **kwargs): return self._opt.get_slot_names(*args, **kwargs)", - "docstring": "Return a list of the names of slots created by the . This simply wraps the get_slot_names() from the actual optimizer. Args: *args: Arguments for get_slot(). **kwargs: Keyword arguments for get_slot(). Returns: A list of strings.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_optimizer.py", - "ast_data": "FunctionDef name:get_slot_names arguments arg:self vararg:args kwarg:kwargs Return return:yes" - }, - { - "library": "tensorflow", - "name": "match_filenames_once", - "source_code": "@tf_export('io.match_filenames_once', v1 = ['io.match_filenames_once', 'train.match_filenames_once']) @deprecation.deprecated_endpoints('train.match_filenames_once') def match_filenames_once(pattern, name = None): with ops.name_scope(name, 'matching_filenames', [pattern]) as name: return variable_v1.VariableV1(name = name, initial_value = io_ops.matching_files(pattern), trainable = False, validate_shape = False, collections = [ops.GraphKeys.LOCAL_VARIABLES])", - "docstring": "Save the list of files matching pattern, so it is only computed once. NOTE: The order of the files returned is deterministic. Args: pattern: A file pattern (glob), or 1D tensor of file patterns. name: A name for the operations (optional). Returns: A variable that is initialized to the list of files matching the pattern(s).", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\training\\input.py", - "ast_data": "FunctionDef name:match_filenames_once arguments arg:pattern arg:name Call call:tf_export Call call:deprecated_endpoints With Return return:yes" - }, - { - "library": "flexx", - "name": "get_connections", - "source_code": "def get_connections(self, name): _, pending, connected = self._appinfo[name] return list(connected)", - "docstring": "Given an app name, return the connected session objects.", - "type": "method", - "file_path": "flexx\\flexx\\app\\_app.py", - "ast_data": "FunctionDef name:get_connections arguments arg:self arg:name Assign Return return:yes" - }, - { - "library": "numpy", - "name": "dos2unix", - "source_code": "def dos2unix(file): if os.path.isdir(file): print(file, 'Directory!') return with open(file, 'rb') as fp: data = fp.read() if '\\x00' in data: print(file, 'Binary!') return newdata = re.sub('\\r\\n', '\\n', data) if newdata ! = data: print('dos2unix: ', file) with open(file, 'wb') as f: f.write(newdata) return file else: print(file, 'ok')", - "docstring": "Replace CRLF with LF in argument files. Print names of changed files.", - "type": "function", - "file_path": "numpy\\numpy\\distutils\\line_endings.py", - "ast_data": "FunctionDef name:dos2unix arguments arg:file If Call call:isdir Return return:no With Assign Call call:read If Compare op:In Return return:no Assign Call call:sub If Compare op:NotEq With Return return:yes" - }, - { - "library": "scipy", - "name": "boxcar", - "source_code": "def boxcar(M, sym = True, *, xp = None, device = None): xp = _namespace(xp) if _len_guards(M): return xp.ones(M, dtype = xp.float64, device = device) M, needs_trunc = _extend(M, sym) w = xp.ones(M, dtype = xp.float64, device = device) return _truncate(w, needs_trunc)", - "docstring": "Return a boxcar or rectangular window. Also known as a rectangular window or Dirichlet window, this is equivalent to no window at all. Parameters ---------- M : int Number of points in the output window. If zero, an empty array is returned. An exception is thrown when it is negative. sym : bool, optional Whether the window is symmetric. (Has no effect for boxcar.) %(xp_device_snippet)s Returns ------- w : ndarray The window, with the maximum value normalized to 1. Examples -------- Plot the window and its frequency response: >>> import numpy as np >>> from scipy import signal >>> from scipy.fft import fft, fftshift >>> import matplotlib.pyplot as plt >>> window = signal.windows.boxcar(51) >>> plt.plot(window) >>> plt.title(\"Boxcar window\") >>> plt.ylabel(\"Amplitude\") >>> plt.xlabel(\"Sample\") >>> plt.figure() >>> A = fft(window, 2048) / (len(window)/2.0) >>> freq = np.linspace(-0.5, 0.5, len(A)) >>> response = 20 * np.log10(np.abs(fftshift(A / abs(A).max()))) >>> plt.plot(freq, response) >>> plt.axis([-0.5, 0.5, -120, 0]) >>> plt.title(\"Frequency response of the boxcar window\") >>> plt.ylabel(\"Normalized magnitude [dB]\") >>> plt.xlabel(\"Normalized frequency [cycles per sample]\")", - "type": "function", - "file_path": "scipy\\scipy\\signal\\windows\\_windows.py", - "ast_data": "FunctionDef name:boxcar arguments arg:M arg:sym Assign Call call:_namespace If Call call:_len_guards Return return:yes Assign Call call:_extend Assign Call call:ones Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_per_worker_dataset", - "source_code": "def get_per_worker_dataset(dataset_or_dataset_fn, coordinator): if callable(dataset_or_dataset_fn): return PerWorkerDatasetFromDatasetFunction(dataset_or_dataset_fn, coordinator) else: return PerWorkerDatasetFromDataset(dataset_or_dataset_fn, coordinator)", - "docstring": "Returns a per-worker dataset from a dataset or a dataset function.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py", - "ast_data": "FunctionDef name:get_per_worker_dataset arguments arg:dataset_or_dataset_fn arg:coordinator If Call call:callable Return return:yes Return return:yes" - }, - { - "library": "kornia", - "name": "compute_projection_matrix", - "source_code": "def compute_projection_matrix(self, pinhole_src: PinholeCamera) -> DepthWarper: if not isinstance(self._pinhole_dst, PinholeCamera): raise TypeError(f'Member self._pinhole_dst expected to be of class PinholeCamera. Got {type(self._pinhole_dst)}') if not isinstance(pinhole_src, PinholeCamera): raise TypeError(f'Argument pinhole_src expected to be of class PinholeCamera. Got {type(pinhole_src)}') dst_trans_src: Tensor = compose_transformations(self._pinhole_dst.extrinsics, inverse_transformation(pinhole_src.extrinsics)) dst_proj_src: Tensor = torch.matmul(self._pinhole_dst.intrinsics, dst_trans_src) self._pinhole_src = pinhole_src self._dst_proj_src = dst_proj_src return self", - "docstring": "Compute the projection matrix from the source to destination frame.", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\depth.py", - "ast_data": "FunctionDef name:compute_projection_matrix arguments arg:self arg:pinhole_src type:PinholeCamera If Raise raises:TypeError(f'Member self._pinhole_dst expected to be of class PinholeCamera. Got {type(self._pinhole_dst)}') If Raise raises:TypeError(f'Argument pinhole_src expected to be of class PinholeCamera. Got {type(pinhole_src)}') Assign Assign Return return:yes" - }, - { - "library": "scipy", - "name": "update", - "source_code": "def update(self, delta_x, delta_grad): raise NotImplementedError('The method ``update(delta_x, delta_grad)`` is not implemented.')", - "docstring": "Update internal matrix. Update Hessian matrix or its inverse (depending on how 'approx_type' is defined) using information about the last evaluated points. Parameters ---------- delta_x : ndarray The difference between two points the gradient function have been evaluated at: ``.", - "type": "method", - "file_path": "scipy\\scipy\\optimize\\_hessian_update_strategy.py", - "ast_data": "FunctionDef name:update arguments arg:self arg:delta_x arg:delta_grad Raise raises:NotImplementedError('The method ``update(delta_x, delta_grad)`` is not implemented.')" - }, - { - "library": "seaborn", - "name": "default_range", - "source_code": "@property def default_range(self) -> tuple[float, float]: base = mpl.rcParams['lines.linewidth'] return (base * 0.5, base * 2)", - "docstring": "Min and max values used by default for semantic mapping.", - "type": "method", - "file_path": "seaborn\\seaborn\\_core\\properties.py", - "ast_data": "FunctionDef name:default_range arguments arg:self Assign Return return:yes" - }, - { - "library": "scikit-learn", - "name": "fit", - "source_code": "@_fit_context(prefer_skip_nested_validation = True) def fit(self, X, y = None): self._fit_transform(X) return self", - "docstring": "Compute the embedding vectors for data X. Parameters ---------- X : array-like of shape (n_samples, n_features) Training set. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Fitted class instance.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\manifold\\_locally_linear.py", - "ast_data": "FunctionDef name:fit arguments arg:self arg:X arg:y Call call:_fit_context Return return:yes" - }, - { - "library": "scikit-learn", - "name": "transform", - "source_code": "def transform(self, X): check_is_fitted(self) X = validate_data(self, X, reset = False) Ih = np.eye(len(self.components_)) X_transformed = X - self.mean_ Wpsi = self.components_ / self.noise_variance_ cov_z = linalg.inv(Ih + np.dot(Wpsi, self.components_.T)) tmp = np.dot(X_transformed, Wpsi.T) X_transformed = np.dot(tmp, cov_z) return X_transformed", - "docstring": "Apply dimensionality reduction to X using the model. Compute the expected mean of the latent variables. See Barber, 21.2.33 (or Bishop, 12.66). Parameters ---------- X : array-like of shape (n_samples, n_features) Training data. Returns ------- X_new : ndarray of shape (n_samples, n_components) The latent variables of X.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\decomposition\\_factor_analysis.py", - "ast_data": "FunctionDef name:transform arguments arg:self arg:X Assign Call call:validate_data Assign Call call:eye Assign Assign Assign Call call:inv Assign Call call:dot Assign Call call:dot Return return:yes" - }, - { - "library": "pytorch", - "name": "PassResult", - "source_code": "@compatibility(is_backward_compatible = False) class PassResult(namedtuple('PassResult', ['graph_module', 'modified'])): __slots__ = () def __new__(cls, graph_module, modified): return super().__new__(cls, graph_module, modified)", - "docstring": "Result of a pass: graph_module: The modified graph module modified: A flag for if the pass has modified the graph module", - "type": "class", - "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_base.py", - "ast_data": "ClassDef name:PassResult Call call:namedtuple Call call:compatibility Assign FunctionDef name:__new__ arguments arg:cls arg:graph_module arg:modified Return return:yes" - }, - { - "library": "pytorch", - "name": "range", - "source_code": "@contextmanager def range(msg, *args, **kwargs): range_push(msg.format(*args, **kwargs)) try: yield finally: range_pop()", - "docstring": "Context manager / decorator that pushes an NVTX range at the beginning of its scope, and pops it at the end. If extra arguments are given, they are passed as arguments to msg.format(). Args: msg (str): message to associate with the range", - "type": "function", - "file_path": "pytorch\\torch\\cuda\\nvtx.py", - "ast_data": "FunctionDef name:range arguments arg:msg vararg:args kwarg:kwargs Try" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "@_docstring.interpd def __init__(self, axes, spine_type, path, **kwargs): super().__init__(**kwargs) self.axes = axes self.set_figure(self.axes.get_figure(root = False)) self.spine_type = spine_type self.set_facecolor('none') self.set_edgecolor(mpl.rcParams['axes.edgecolor']) self.set_linewidth(mpl.rcParams['axes.linewidth']) self.set_capstyle('projecting') self.axis = None self.set_zorder(2.5) self.set_transform(self.axes.transData) self._bounds = None self._position = None _api.check_isinstance(mpath.Path, path = path) self._path = path self._patch_type = 'line' self._patch_transform = mtransforms.IdentityTransform()", - "docstring": "Parameters ---------- axes : The instance containing the spine. spine_type : str The spine type. path : The instance used to draw the spine. Other Parameters ---------------- **kwargs Valid keyword arguments are: %(Patch:kwdoc)s", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\spines.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:axes arg:spine_type arg:path kwarg:kwargs Assign Assign Assign Assign Assign Assign Assign Assign Call call:IdentityTransform" - }, - { - "library": "pandas", - "name": "idxmax", - "source_code": "def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: axis = self._get_axis_number(axis) iloc = self.argmax(axis, skipna, *args, **kwargs) return self.index[iloc]", - "docstring": "Return the row label of the maximum value. If multiple values equal the maximum, the first row label with that value is returned. Parameters ---------- axis : {0 or 'index'} Unused. Parameter needed for compatibility with DataFrame. skipna : bool, default True Exclude NA/null values. If the entire Series is NA, or if ``. Examples -------- >>> s = pd.Series(data=[1, None, 4, 3, 4], index=[\"A\", \"B\", \"C\", \"D\", \"E\"]) >>> s A 1.0 B NaN C 4.0 D 3.0 E 4.0 dtype: float64 >>> s.idxmax() 'C'", - "type": "method", - "file_path": "pandas\\pandas\\core\\series.py", - "ast_data": "FunctionDef name:idxmax arguments arg:self arg:axis type:Axis arg:skipna type:bool vararg:args kwarg:kwargs Assign Call call:_get_axis_number Assign Call call:argmax Return return:yes" - }, - { - "library": "django", - "name": "get_empty_value_display", - "source_code": "def get_empty_value_display(self): try: return mark_safe(self.empty_value_display) except AttributeError: return mark_safe(self.admin_site.empty_value_display)", - "docstring": "Return the empty_value_display set on ModelAdmin or AdminSite.", - "type": "method", - "file_path": "django\\django\\contrib\\admin\\options.py", - "ast_data": "FunctionDef name:get_empty_value_display arguments arg:self Try Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "tensorflow", - "name": "create_slot", - "source_code": "def create_slot(primary, val, name, colocate_with_primary = True, *, copy_xla_sharding = False): validate_shape = val.get_shape().is_fully_defined() if isinstance(primary, variables.Variable): prefix = primary._shared_name else: prefix = primary.op.name with variable_scope.variable_scope(None, prefix + '/' + name): if colocate_with_primary: distribution_strategy = distribute_lib.get_strategy() with distribution_strategy.extended.colocate_vars_with(primary): return _create_slot_var(primary, val, '', validate_shape, None, None, copy_xla_sharding = copy_xla_sharding) else: return _create_slot_var(primary, val, '', validate_shape, None, None, copy_xla_sharding = copy_xla_sharding)", - "docstring": "Create a slot initialized to the given value. The type of the slot is determined by the given value. Args: primary: The primary or . val: A specifying the initial value of the slot. name: Name to use for the slot variable. colocate_with_primary: Boolean. If True the slot is located on the same device as . copy_xla_sharding: Boolean. If True also copies XLA sharding from primary. Returns: A object.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\training\\slot_creator.py", - "ast_data": "FunctionDef name:create_slot arguments arg:primary arg:val arg:name arg:colocate_with_primary Assign Call call:is_fully_defined If Call call:isinstance Assign Assign With If Assign Call call:get_strategy With Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "__repr__", - "source_code": "def __repr__(self) -> str: if self._info_repr(): buf = StringIO() self.info(buf = buf) return buf.getvalue() repr_params = fmt.get_dataframe_repr_params() return self.to_string(**repr_params)", - "docstring": "Return a string representation for a particular DataFrame.", - "type": "method", - "file_path": "pandas\\pandas\\core\\frame.py", - "ast_data": "FunctionDef name:__repr__ arguments arg:self If Call call:_info_repr Assign Call call:StringIO Return return:yes Assign Call call:get_dataframe_repr_params Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_width", - "source_code": "def set_width(self, w): self._width = w self.stale = True", - "docstring": "Set the rectangle width. Parameters ---------- w : float", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:set_width arguments arg:self arg:w Assign Assign" - }, - { - "library": "scipy", - "name": "sqeuclidean", - "source_code": "def sqeuclidean(u, v, w = None): utype, vtype = (None, None) if not (hasattr(u, 'dtype') and np.issubdtype(u.dtype, np.inexact)): utype = np.float64 if not (hasattr(v, 'dtype') and np.issubdtype(v.dtype, np.inexact)): vtype = np.float64 u = _validate_vector(u, dtype = utype) v = _validate_vector(v, dtype = vtype) u_v = u - v u_v_w = u_v if w is not None: w = _validate_weights(w) u_v_w = w * u_v return np.dot(u_v, u_v_w)", - "docstring": "Compute the squared Euclidean distance between two 1-D arrays. The squared Euclidean distance between and is defined as .. math:: \\sum_i{w_i |u_i - v_i|^2} Parameters ---------- u : (N,) array_like Input array. v : (N,) array_like Input array. w : (N,) array_like, optional The weights for each value in and . Default is None, which gives each value a weight of 1.0 Returns ------- sqeuclidean : double The squared Euclidean distance between vectors and . Examples -------- >>> from scipy.spatial import distance >>> distance.sqeuclidean([1, 0, 0], [0, 1, 0]) 2.0 >>> distance.sqeuclidean([1, 1, 0], [0, 1, 0]) 1.0", - "type": "function", - "file_path": "scipy\\scipy\\spatial\\distance.py", - "ast_data": "FunctionDef name:sqeuclidean arguments arg:u arg:v arg:w Assign If Assign If Assign Assign Call call:_validate_vector Assign Call call:_validate_vector Assign Assign If Compare op:IsNot Assign Call call:_validate_weights Assign Return return:yes" - }, - { - "library": "sphinx", - "name": "build_update", - "source_code": "@final def build_update(self) -> None: self.compile_update_catalogs() to_build = self.get_outdated_docs() if isinstance(to_build, str): self.build(['__all__'], summary = to_build, method = 'update') else: to_build = set(to_build) self.build(to_build, summary = __('targets for %d source files that are out of date') % len(to_build), method = 'update')", - "docstring": "Only rebuild what was changed or added since last build.", - "type": "method", - "file_path": "sphinx\\sphinx\\builders\\__init__.py", - "ast_data": "FunctionDef name:build_update arguments arg:self Assign Call call:get_outdated_docs If Call call:isinstance Assign Call call:set" - }, - { - "library": "tensorflow", - "name": "fn_args", - "source_code": "def fn_args(fn): if isinstance(fn, functools.partial): args = fn_args(fn.func) args = [a for a in args[len(fn.args):] if a not in (fn.keywords or [])] else: if hasattr(fn, '__call__') and tf_inspect.ismethod(fn.__call__): fn = fn.__call__ args = tf_inspect.getfullargspec(fn).args if _is_bound_method(fn) and args: args.pop(0) return tuple(args)", - "docstring": "Get argument names for function-like object. Args: fn: Function, or function-like object (e.g., result of ). Returns: of string argument names. Raises: ValueError: if partial function has positionally bound arguments", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\variable_scope_shim.py", - "ast_data": "FunctionDef name:fn_args arguments arg:fn If Call call:isinstance Assign Call call:fn_args Assign If BoolOp Call call:hasattr Call call:ismethod Assign Assign If BoolOp Call call:_is_bound_method Return return:yes" - }, - { - "library": "pytorch", - "name": "stride_ordered_for_memory_format", - "source_code": "@staticmethod def stride_ordered_for_memory_format(sizes, memory_format): if memory_format = = torch.channels_last: return FlexibleLayout.stride_ordered(sizes, NHWC_STRIDE_ORDER) elif memory_format = = torch.channels_last_3d: return FlexibleLayout.stride_ordered(sizes, NHWDC_STRIDE_ORDER) elif memory_format = = torch.contiguous_format: return FlexibleLayout.contiguous_strides(sizes) else: log.debug('stride_ordered_for_memory_format, unsuppored memory_format: %s', memory_format) raise NotImplementedError", - "docstring": "Create a stride based on a memory format. Memory format is translasted into a stride order, so channels_last is the same as: FlexibleLayout.stride_ordered(sizes, [3, 0, 2, 1]) This interface does not support memory_format which should be used to deduce a format from another source", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\ir.py", - "ast_data": "FunctionDef name:stride_ordered_for_memory_format arguments arg:sizes arg:memory_format If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes Raise raises:NotImplementedError" - }, - { - "library": "matplotlib", - "name": "get_angles", - "source_code": "def get_angles(self): return np.rad2deg(self._angles)", - "docstring": "Get the angles of the first axes, degrees CCW from the x-axis.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\collections.py", - "ast_data": "FunctionDef name:get_angles arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "set_dir", - "source_code": "def set_dir(d: Union[str, os.PathLike]) -> None: global _hub_dir _hub_dir = os.path.expanduser(d)", - "docstring": "Optionally set the Torch Hub directory used to save downloaded models & weights. Args: d (str): path to a local folder to save downloaded models & weights.", - "type": "function", - "file_path": "pytorch\\torch\\hub.py", - "ast_data": "FunctionDef name:set_dir arguments arg:d type:Union[str, os.PathLike] Assign Call call:expanduser" - }, - { - "library": "algorithms", - "name": "two_sum1", - "source_code": "def two_sum1(numbers, target): dic = {} for i, num in enumerate(numbers): if target - num in dic: return [dic[target - num] + 1, i + 1] dic[num] = i return None", - "docstring": "Given a list of numbers, find the indices of two numbers such that their sum is the given target. Using a hash table.", - "type": "function", - "file_path": "algorithms\\algorithms\\search\\two_sum.py", - "ast_data": "FunctionDef name:two_sum1 arguments arg:numbers arg:target Assign For Call call:enumerate If Compare op:In Return return:yes Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "copy_fwd_metadata_to_bw_nodes", - "source_code": "def copy_fwd_metadata_to_bw_nodes(fx_g): def _is_forward_node_with_seq_nr(node): return 'nn_module_stack' in node.meta and 'seq_nr' in node.meta def _is_backward_node_with_seq_nr(node): return 'nn_module_stack' not in node.meta and 'seq_nr' in node.meta fwd_seq_nr_to_node = {} for node in fx_g.graph.nodes: if not _is_forward_node_with_seq_nr(node): continue seq_nr = node.meta['seq_nr'] if seq_nr in fwd_seq_nr_to_node: continue fwd_seq_nr_to_node[node.meta['seq_nr']] = node for node in fx_g.graph.nodes: if not _is_backward_node_with_seq_nr(node): continue fwd_node = fwd_seq_nr_to_node.get(node.meta['seq_nr']) if fwd_node is not None: node.meta['fwd_nn_module_stack'] = fwd_node.meta['nn_module_stack'] node.meta['fwd_source_fn_stack'] = fwd_node.meta.get('source_fn_stack')", - "docstring": "Input: which contains the joint fwd+bwd FX graph created by aot_autograd. This function walks the graph and copies over metadata from forward nodes to backward nodes, using the field as a one-to-many mapping from forward node to backward node. This metadata is useful for performance profiling and debugging.", - "type": "function", - "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\utils.py", - "ast_data": "FunctionDef name:copy_fwd_metadata_to_bw_nodes arguments arg:fx_g FunctionDef name:_is_forward_node_with_seq_nr arguments arg:node Return return:yes FunctionDef name:_is_backward_node_with_seq_nr arguments arg:node Return return:yes Assign For If Assign If Compare op:In Assign For If Assign Call call:get If Compare op:IsNot Assign Assign Call call:get" - }, - { - "library": "pandas", - "name": "ParserWarning", - "source_code": "class ParserWarning(Warning): pass", - "docstring": "Warning raised when reading a file that doesn't use the default 'c' parser. Raised by and when it is necessary to change parsers, generally from the default 'c' parser to 'python'. It happens due to a lack of support or functionality for parsing a particular attribute of a CSV file with the requested engine. Currently, 'c' unsupported options include the following parameters: 1. other than a single character (e.g. regex separators) 2. higher than 0 The warning can be avoided by adding as a parameter in and methods. See Also -------- pd.read_csv : Read CSV (comma-separated) file into DataFrame. pd.read_table : Read general delimited file into DataFrame. Examples -------- Using a in other than a single character: >>> import io >>> csv = '''a;b;c ... 1;1,8 ... 1;2,1''' >>> df = pd.read_csv(io.StringIO(csv), sep=\"[;,]\") # doctest: +SKIP ... # ParserWarning: Falling back to the 'python' engine... Adding to removes the Warning: >>> df = pd.read_csv(io.StringIO(csv), sep=\"[;,]\", engine=\"python\")", - "type": "class", - "file_path": "pandas\\pandas\\errors\\__init__.py", - "ast_data": "ClassDef name:ParserWarning" - }, - { - "library": "scipy", - "name": "step", - "source_code": "def step(self, x0 = None, t = None, n = None): return dstep(self, x0 = x0, t = t, n = n)", - "docstring": "Return the step response of the discrete-time system. See for details.", - "type": "method", - "file_path": "scipy\\scipy\\signal\\_ltisys.py", - "ast_data": "FunctionDef name:step arguments arg:self arg:x0 arg:t arg:n Return return:yes" - }, - { - "library": "pytorch", - "name": "clear_weight_quant_obs_node", - "source_code": "def clear_weight_quant_obs_node(op_node: Node, modules: dict[str, nn.Module]) -> None: weight_eq_obs_node = maybe_get_weight_eq_obs_node(op_node, modules) if weight_eq_obs_node is None: return weight_quant_obs_node = weight_eq_obs_node.args[0] if weight_quant_obs_node is None: return assert isinstance(weight_quant_obs_node, Node) weight_quant_obs = modules[str(weight_quant_obs_node.target)] assert isinstance(modules[str(weight_quant_obs_node.target)], ObserverBase) weight_quant_obs.reset_min_max_vals()", - "docstring": "Given the operation node, we want find the corresponding quantization observer and reset its min/max values", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py", - "ast_data": "FunctionDef name:clear_weight_quant_obs_node arguments arg:op_node type:Node arg:modules type:dict[str, nn.Module] Assign Call call:maybe_get_weight_eq_obs_node If Compare op:Is Return return:no Assign If Compare op:Is Return return:no Assign" - }, - { - "library": "pytorch", - "name": "caching_allocator_enable", - "source_code": "def caching_allocator_enable(value: bool = True) -> None: if is_initialized(): torch._C._cuda_cudaCachingAllocator_enable(value)", - "docstring": "Enable or disable the CUDA memory allocator. On by default.", - "type": "function", - "file_path": "pytorch\\torch\\cuda\\memory.py", - "ast_data": "FunctionDef name:caching_allocator_enable arguments arg:value type:bool If Call call:is_initialized" - }, - { - "library": "tensorflow", - "name": "maybe_do_strip", - "source_code": "def maybe_do_strip(node: node_def_pb2.NodeDef) -> None: if node.op = = 'Assert' or node.op = = 'PrintV2': node.op = 'NoOp' erase_regular_node_attributes(node) new_inputs = [] for inp in node.input: if not is_control_input(inp): new_inputs.append(as_control_dep(inp)) else: new_inputs.append(inp) node.ClearField('input') node.input.extend(new_inputs) elif node.op = = 'CheckNumerics' or node.op = = 'Print': node.op = 'Identity' prune_all_non_t_attributes(node) for i in range(1, len(node.input)): if not is_control_input(node.input[i]): node.input[i] = as_control_dep(node.input[i])", - "docstring": "Strips the graph from Assert and CheckNumerics ops. For Assert ops, this function also rewrites all of the inputs to the nodes that were transformed by making them into control dependencies. It also removes all of the regular node attributes, that is all node attributes that do not start with . For CheckNumerics ops, this function turns the op into an Identity op, which will be pruned later (according to the original implementation in grappler's . Then, since Identity ops only take one input, it leaves the first input as is while transforming the other ones into control dependencies. Args: node: The node to potentally strip.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py", - "ast_data": "FunctionDef name:maybe_do_strip arguments arg:node type:node_def_pb2.NodeDef If BoolOp Compare op:Eq Compare op:Eq Assign Assign For If If BoolOp Compare op:Eq Compare op:Eq Assign For Call call:range If Assign Call call:as_control_dep" - }, - { - "library": "tensorflow", - "name": "shape_type_conversion", - "source_code": "def shape_type_conversion(fn): def wrapper(instance, input_shape): if input_shape is not None: input_shape = convert_shapes(input_shape, to_tuples = True) output_shape = fn(instance, input_shape) if output_shape is not None: output_shape = convert_shapes(output_shape, to_tuples = False) return output_shape return wrapper", - "docstring": "Decorator that handles tuple/TensorShape conversion. Used in and . Args: fn: function to wrap. Returns: Wrapped function.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py", - "ast_data": "FunctionDef name:shape_type_conversion arguments arg:fn FunctionDef name:wrapper arguments arg:instance arg:input_shape If Compare op:IsNot Assign Call call:convert_shapes Assign Call call:fn If Compare op:IsNot Assign Call call:convert_shapes Return return:yes Return return:yes" - }, - { - "library": "sphinx", - "name": "literal_strong", - "source_code": "class literal_strong(nodes.strong, not_smartquotable): pass", - "docstring": "Node that behaves like , but further text processors are not applied (e.g. smartypants for HTML output).", - "type": "class", - "file_path": "sphinx\\sphinx\\addnodes.py", - "ast_data": "ClassDef name:literal_strong" - }, - { - "library": "tensorflow", - "name": "transform_ast", - "source_code": "def transform_ast(self, node, ctx): raise NotImplementedError('subclasses must override this')", - "docstring": "Performs an actual transformation of a function's AST. Subclasses must implement this method, and do not usually call it. Args: node: One or more ast.AST nodes representing the AST to be transformed. ctx: transformer.Context.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transpiler.py", - "ast_data": "FunctionDef name:transform_ast arguments arg:self arg:node arg:ctx Raise raises:NotImplementedError('subclasses must override this')" - }, - { - "library": "tensorflow", - "name": "is_split_variable", - "source_code": "def is_split_variable(v): return hasattr(v, '_variable_list') or hasattr(v, '_variables')", - "docstring": "Returns True if is either a PartitionedVariable or a ShardedVariable.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py", - "ast_data": "FunctionDef name:is_split_variable arguments arg:v Return return:yes" - }, - { - "library": "pandas", - "name": "format_table_styles", - "source_code": "def format_table_styles(styles: CSSStyles) -> CSSStyles: return [{'selector': selector, 'props': css_dict['props']} for css_dict in styles for selector in css_dict['selector'].split(', ')]", - "docstring": "looks for multiple CSS selectors and separates them: [{'selector': 'td, th', 'props': 'a:v;'}] ---> [{'selector': 'td', 'props': 'a:v;'}, {'selector': 'th', 'props': 'a:v;'}]", - "type": "function", - "file_path": "pandas\\pandas\\io\\formats\\style_render.py", - "ast_data": "FunctionDef name:format_table_styles arguments arg:styles type:CSSStyles Return return:yes" - }, - { - "library": "tensorflow", - "name": "set_number_of_shards", - "source_code": "def set_number_of_shards(self, number_of_shards): if self._frozen: if self._number_of_shards ! = number_of_shards: raise ValueError(f\"Can't set sharding policy to use {number_of_shards} shards since it has been frozen to use {self._number_of_shards}\") elif number_of_shards > 0: self._number_of_shards = number_of_shards else: raise ValueError(f\"Can't set sharding policy to use {number_of_shards} shards; value must be > 0\")", - "docstring": "Sets the number of shards for the current policy. If the policy has been frozen then number_of_shards must match the existing setting. Args: number_of_shards: The number of shards to use in the policy. Raises: ValueError: If the policy has been frozen and number_of_shards differs from the frozen value; or number_of_shards <= 0.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_sharding.py", - "ast_data": "FunctionDef name:set_number_of_shards arguments arg:self arg:number_of_shards If If Compare op:NotEq Raise raises:ValueError(f\"Can't set sharding policy to use {number_of_shards} shards since it has been frozen to use {self._number_of_shards}\") If Compare op:Gt Assign Raise raises:ValueError(f\"Can't set sharding policy to use {number_of_shards} shards; value must be > 0\")" - }, - { - "library": "django", - "name": "j", - "source_code": "def j(self): return self.data.day", - "docstring": "Day of the month without leading zeros; i.e. '1' to '31'", - "type": "method", - "file_path": "django\\django\\utils\\dateformat.py", - "ast_data": "FunctionDef name:j arguments arg:self Return return:yes" - }, - { - "library": "numpy", - "name": "can_ccompiler_link", - "source_code": "def can_ccompiler_link(self, ccompiler): return True", - "docstring": "Check if the given C compiler can link objects produced by this compiler.", - "type": "method", - "file_path": "numpy\\numpy\\distutils\\fcompiler\\__init__.py", - "ast_data": "FunctionDef name:can_ccompiler_link arguments arg:self arg:ccompiler Return return:yes" - }, - { - "library": "django", - "name": "generate_filename", - "source_code": "def generate_filename(self, instance, filename): if callable(self.upload_to): filename = self.upload_to(instance, filename) else: dirname = datetime.datetime.now().strftime(str(self.upload_to)) filename = posixpath.join(dirname, filename) filename = validate_file_name(filename, allow_relative_path = True) return self.storage.generate_filename(filename)", - "docstring": "Apply (if callable) or prepend (if a string) upload_to to the filename, then delegate further processing of the name to the storage backend. Until the storage layer, all file paths are expected to be Unix style (with forward slashes).", - "type": "method", - "file_path": "django\\django\\db\\models\\fields\\files.py", - "ast_data": "FunctionDef name:generate_filename arguments arg:self arg:instance arg:filename If Call call:callable Assign Call call:upload_to Assign Call call:strftime Assign Call call:join Assign Call call:validate_file_name Return return:yes" - }, - { - "library": "scipy", - "name": "__call__", - "source_code": "def __call__(self, t, *args, **kwargs): raise NotImplementedError", - "docstring": "Apply the transformation to `` and multiply by the Jacobian determinant. This should be the new integrand after the transformation has been applied so that the following is satisfied:: f_transformed = _VariableTransform(f) cubature(f, a, b) == cubature( f_transformed, *f_transformed.transformed_limits(a, b), )", - "type": "method", - "file_path": "scipy\\scipy\\integrate\\_cubature.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:t vararg:args kwarg:kwargs Raise raises:NotImplementedError" - }, - { - "library": "scikit-learn", - "name": "transform", - "source_code": "def transform(self, X): check_is_fitted(self, 'categories_') X_int, X_mask = self._transform(X, handle_unknown = self.handle_unknown, ensure_all_finite = 'allow-nan', ignore_category_indices = self._missing_indices) X_trans = X_int.astype(self.dtype, copy = False) for cat_idx, missing_idx in self._missing_indices.items(): X_missing_mask = X_int[:, cat_idx] = = missing_idx X_trans[X_missing_mask, cat_idx] = self.encoded_missing_value if self.handle_unknown = = 'use_encoded_value': X_trans[~X_mask] = self.unknown_value return X_trans", - "docstring": "Transform X to ordinal codes. Parameters ---------- X : array-like of shape (n_samples, n_features) The data to encode. Returns ------- X_out : ndarray of shape (n_samples, n_features) Transformed input.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\preprocessing\\_encoders.py", - "ast_data": "FunctionDef name:transform arguments arg:self arg:X Assign Call call:_transform Assign Call call:astype For Call call:items Assign Compare op:Eq Assign If Compare op:Eq Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_function_def", - "source_code": "def get_function_def(fname, graph): if context.executing_eagerly(): if context.context().has_function(fname): return context.context().get_function_def(fname) else: while graph is not None: if graph._is_function(fname): return graph._get_function(fname).cached_definition graph = getattr(graph, 'outer_graph', None)", - "docstring": "Gets a function definition with in the current context.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\function_def_to_graph.py", - "ast_data": "FunctionDef name:get_function_def arguments arg:fname arg:graph If Call call:executing_eagerly If Call call:has_function Return return:yes While Compare op:IsNot If Call call:_is_function Return return:yes Assign Call call:getattr" - }, - { - "library": "algorithms", - "name": "__floordiv__", - "source_code": "def __floordiv__(self, other: Union[int, float, Fraction, Monomial]): return self.__truediv__(other)", - "docstring": "For Polynomials, floordiv is the same as truediv.", - "type": "method", - "file_path": "algorithms\\algorithms\\maths\\polynomial.py", - "ast_data": "FunctionDef name:__floordiv__ arguments arg:self arg:other type:Union[int, float, Fraction, Monomial] Return return:yes" - }, - { - "library": "kornia", - "name": "random", - "source_code": "@classmethod def random(cls, batch_size: Optional[int] = None, device: Optional[Device] = None, dtype: Dtype = None) -> Se3: shape: tuple[int, ...] if batch_size is None: shape = () else: KORNIA_CHECK(batch_size > = 1, msg = 'batch_size must be positive') shape = (batch_size,) r = So3.random(batch_size, device, dtype) t = Vector3.random(shape, device, dtype) return cls(r, t)", - "docstring": "Create a Se3 group representing a random transformation. Args: batch_size: the batch size of the underlying data. device: device to place the result on. dtype: dtype of the result. Example: >>> s = Se3.random() >>> s = Se3.random(batch_size=3)", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py", - "ast_data": "FunctionDef name:random arguments arg:cls arg:batch_size type:Optional[int] arg:device type:Optional[Device] arg:dtype type:Dtype If Compare op:Is Assign Assign Assign Call call:random Assign Call call:random Return return:yes" - }, - { - "library": "django", - "name": "split_url", - "source_code": "def split_url(url): try: return list(urlsplit(url)) except ValueError: raise ValidationError(self.error_messages['invalid'], code = 'invalid')", - "docstring": "Return a list of url parts via urlsplit(), or raise ValidationError for some malformed URLs.", - "type": "method", - "file_path": "django\\django\\forms\\fields.py", - "ast_data": "FunctionDef name:split_url arguments arg:url Try Return return:yes ExceptHandler Raise raises:ValidationError(self.error_messages['invalid'], code='invalid')" - }, - { - "library": "pytorch", - "name": "NoopObserver", - "source_code": "class NoopObserver(ObserverBase): def __init__(self, dtype = torch.float16, custom_op_name = '') -> None: super().__init__(dtype = dtype, is_dynamic = False) self.dtype = dtype self.custom_op = custom_op_name def forward(self, x): return x @torch.jit.export def calculate_qparams(self): raise Exception('calculate_qparams should not be called for NoopObserver')", - "docstring": "Observer that doesn't do anything and just passes its configuration to the quantized module's ``. Primarily used for quantization to float16 which doesn't require determining ranges. Args: dtype: Quantized data type custom_op_name: (temporary) specify this observer for an operator that doesn't require any observation (Can be used in Graph Mode Passes for special case ops).", - "type": "class", - "file_path": "pytorch\\torch\\ao\\quantization\\observer.py", - "ast_data": "ClassDef name:NoopObserver FunctionDef name:__init__ arguments arg:self arg:dtype arg:custom_op_name Assign Assign FunctionDef name:forward arguments arg:self arg:x Return return:yes FunctionDef name:calculate_qparams arguments arg:self Raise raises:Exception('calculate_qparams should not be called for NoopObserver')" - }, - { - "library": "pytorch", - "name": "nodes", - "source_code": "@property def nodes(self) -> _node_list: return _node_list(self)", - "docstring": "Get the list of Nodes that constitute this Graph. Note that this `` can be called on this list to switch iteration order.", - "type": "method", - "file_path": "pytorch\\torch\\fx\\graph.py", - "ast_data": "FunctionDef name:nodes arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "edges", - "source_code": "@property def edges(self): return self._edges", - "docstring": "The default value of for newly added cells using . Notes ----- This setting does currently only affect newly created cells using . To change existing cells, you have to set their edges explicitly:: for c in tab.get_celld().values(): c.visible_edges = 'horizontal'", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\table.py", - "ast_data": "FunctionDef name:edges arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "ConvBnReLU1d", - "source_code": "class ConvBnReLU1d(ConvBn1d): _FLOAT_MODULE: ClassVar[type[nn.Module]] = nni.ConvBnReLU1d _FLOAT_CONV_MODULE: ClassVar[type[nn.Conv1d]] = nn.Conv1d _FLOAT_BN_MODULE: ClassVar[type[nn.BatchNorm1d]] = nn.BatchNorm1d _FLOAT_RELU_MODULE: ClassVar[Optional[type[nn.Module]]] = nn.ReLU _FUSED_FLOAT_MODULE: ClassVar[Optional[type[nn.Module]]] = nni.ConvReLU1d def __init__(self, in_channels, out_channels, kernel_size, stride = 1, padding = 0, dilation = 1, groups = 1, bias = None, padding_mode = 'zeros', eps = 1e-05, momentum = 0.1, freeze_bn = False, qconfig = None): super().__init__(in_channels, out_channels, kernel_size, stride, padding, dilation, groups, bias, padding_mode, eps, momentum, freeze_bn, qconfig) def forward(self, input): return F.relu(self._forward(input)) @classmethod def from_float(cls, mod, use_precomputed_fake_quant = False): return super().from_float(mod, use_precomputed_fake_quant)", - "docstring": "A ConvBnReLU1d module is a module fused from Conv1d, BatchNorm1d and ReLU, attached with FakeQuantize modules for weight, used in quantization aware training. We combined the interface of :class: and :class: and :class:. Similar to , with FakeQuantize modules initialized to default. Attributes: weight_fake_quant: fake quant module for weight", - "type": "class", - "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\qat\\modules\\conv_fused.py", - "ast_data": "ClassDef name:ConvBnReLU1d FunctionDef name:__init__ arguments arg:self arg:in_channels arg:out_channels arg:kernel_size arg:stride arg:padding arg:dilation arg:groups arg:bias arg:padding_mode arg:eps arg:momentum arg:freeze_bn arg:qconfig FunctionDef name:forward arguments arg:self arg:input Return return:yes FunctionDef name:from_float arguments arg:cls arg:mod arg:use_precomputed_fake_quant Return return:yes" - }, - { - "library": "pytorch", - "name": "build_subgraph_buffer", - "source_code": "def build_subgraph_buffer(args: list[TensorBox], subgraph: Subgraph): cnt = 0 env = {} for node in subgraph.graph_module.graph.nodes: if node.op = = 'placeholder': env[node] = args[cnt] cnt + = 1 elif node.op = = 'call_function': args, kwargs = tree_map(lambda x: env[x] if x in env else x, (node.args, node.kwargs)) env[node] = lowerings[node.target](*args, **kwargs) elif node.op = = 'output': def convert_output_node_to_buffer(output): if output is None: return None output_node = output output_buffer = env[output_node] assert isinstance(output_buffer, TensorBox), (\"The output node for B2B-GEMM's subgraph must be a TensorBox, but got: \", type(output_buffer)) assert isinstance(output_buffer.data, StorageBox), (\"The output node for B2B-GEMM's subgraph must be a StorageBox, but got: \", type(output_buffer)) subgraph_buffer = ComputedBuffer(name = None, layout = FlexibleLayout(device = output_buffer.data.get_device(), dtype = output_buffer.data.get_dtype(), size = output_buffer.data.get_size()), data = output_buffer.data.data) return subgraph_buffer return tree_map(convert_output_node_to_buffer, node.args[0]) raise ValueError('B2B-GEMM was passed a subgraph with no output node!')", - "docstring": "This function is adapted from ../kernel/flex_attention.py. The goal is to take in the required args and produce the subgraph buffer The subgraph buffer is a ComputedBuffer that will be inlined into the triton template Args: args: The args that are passed into the subgraph subgraph: The Subgraph ir for which to produce the output node", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\fx_passes\\b2b_gemm.py", - "ast_data": "FunctionDef name:build_subgraph_buffer arguments arg:args type:list[TensorBox] arg:subgraph type:Subgraph Assign Assign For If Compare op:Eq Assign If Compare op:Eq Assign Call call:tree_map Assign Call If Compare op:Eq FunctionDef name:convert_output_node_to_buffer arguments arg:output If Compare op:Is Return return:yes Assign Assign Assign Call call:ComputedBuffer Return return:yes Return return:yes Raise raises:ValueError('B2B-GEMM was passed a subgraph with no output node!')" - }, - { - "library": "django", - "name": "CreateError", - "source_code": "class CreateError(Exception): pass", - "docstring": "Used internally as a consistent exception type to catch from save (see the docstring for SessionBase.save() for details).", - "type": "class", - "file_path": "django\\django\\contrib\\sessions\\backends\\base.py", - "ast_data": "ClassDef name:CreateError" - }, - { - "library": "tensorflow", - "name": "broadcast_sample_weight_modes", - "source_code": "def broadcast_sample_weight_modes(target_structure, sample_weight_modes): if target_structure is None or not nest.flatten(target_structure): return sample_weight_modes if isinstance(sample_weight_modes, str): if isinstance(target_structure, dict): return {key: sample_weight_modes for key in target_structure.keys()} return [sample_weight_modes for _ in target_structure] if sample_weight_modes: try: nest.assert_same_structure(training_utils.list_to_tuple(target_structure), training_utils.list_to_tuple(sample_weight_modes)) except (ValueError, TypeError): target_str = str(nest.map_structure(lambda _: '...', target_structure)) mode_str = str(nest.map_structure(lambda _: '...', sample_weight_modes)) try: sample_weight_modes = nest.pack_sequence_as(target_structure, nest.flatten(sample_weight_modes)) logging.warning('sample_weight modes were coerced from\\n {}\\n to \\n {}'.format(target_str, mode_str)) except (ValueError, TypeError): raise ValueError('Unable to match target structure and sample_weight_modes structure: \\n {}\\n to \\n {}'.format(target_str, mode_str)) return sample_weight_modes", - "docstring": "Match sample_weight_modes structure with output structure.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py", - "ast_data": "FunctionDef name:broadcast_sample_weight_modes arguments arg:target_structure arg:sample_weight_modes If BoolOp Compare op:Is Return return:yes If Call call:isinstance If Call call:isinstance Return return:yes Return return:yes If Try ExceptHandler Assign Call call:str Assign Call call:str Try Assign Call call:pack_sequence_as ExceptHandler Raise raises:ValueError('Unable to match target structure and sample_weight_modes structure:\\n {}\\n to \\n {}'.format(target_str, mode_str)) Return return:yes" - }, - { - "library": "flexx", - "name": "highlight_show", - "source_code": "def highlight_show(self, step = 0): classname = 'highlighted-true' all_items = self._get_all_items_annotated() self._highlight_on = True index1 = self._de_highlight_and_get_highlighted_index(all_items) index2 = 0 if index1 is None else index1 + step while 0 < = index2 < len(all_items): visible, _ = all_items[index2] if visible: break index2 + = step else: index2 = index1 if index2 is not None: _, item = all_items[index2] item._row.classList.add(classname) self._last_highlighted_hint = item.id y1 = item._row.offsetTop - 20 y2 = item._row.offsetTop + item._row.offsetHeight + 20 if self.node.scrollTop > y1: self.node.scrollTop = y1 if self.node.scrollTop + self.node.offsetHeight < y2: self.node.scrollTop = y2 - self.node.offsetHeight", - "docstring": "Highlight the \"current\" item, optionally moving step items.", - "type": "method", - "file_path": "flexx\\flexx\\ui\\widgets\\_tree.py", - "ast_data": "FunctionDef name:highlight_show arguments arg:self arg:step Assign Assign Call call:_get_all_items_annotated Assign Assign Call call:_de_highlight_and_get_highlighted_index Assign While Compare op:LtE op:Lt Assign If Assign If Compare op:IsNot Assign Assign Assign Assign If Compare op:Gt Assign If Compare op:Lt Assign" - }, - { - "library": "django", - "name": "compress", - "source_code": "def compress(self, data_list): raise NotImplementedError('Subclasses must implement this method.')", - "docstring": "Return a single value for the given list of values. The values can be assumed to be valid. For example, if this MultiValueField was instantiated with fields=(DateField(), TimeField()), this might return a datetime object created by combining the date and time in data_list.", - "type": "method", - "file_path": "django\\django\\forms\\fields.py", - "ast_data": "FunctionDef name:compress arguments arg:self arg:data_list Raise raises:NotImplementedError('Subclasses must implement this method.')" - }, - { - "library": "pandas", - "name": "axes", - "source_code": "@property def axes(self) -> list[Index]: return [self.index, self.columns]", - "docstring": "Return a list representing the axes of the DataFrame. It has the row axis labels and column axis labels as the only members. They are returned in that order. See Also -------- DataFrame.index: The index (row labels) of the DataFrame. DataFrame.columns: The column labels of the DataFrame. Examples -------- >>> df = pd.DataFrame({\"col1\": [1, 2], \"col2\": [3, 4]}) >>> df.axes [RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'], dtype='object')]", - "type": "method", - "file_path": "pandas\\pandas\\core\\frame.py", - "ast_data": "FunctionDef name:axes arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "from_any", - "source_code": "def from_any(size, fraction_ref = None): if isinstance(size, Real): return Fixed(size) elif isinstance(size, str): if size[-1] = = '%': return Fraction(float(size[: -1]) / 100, fraction_ref) raise ValueError('Unknown format')", - "docstring": "Create a Fixed unit when the first argument is a float, or a Fraction unit if that is a string that ends with %. The second argument is only meaningful when Fraction unit is created. >>> from mpl_toolkits.axes_grid1.axes_size import from_any >>> a = from_any(1.2) # => Fixed(1.2) >>> from_any(\"50%\", a) # => Fraction(0.5, a)", - "type": "function", - "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_size.py", - "ast_data": "FunctionDef name:from_any arguments arg:size arg:fraction_ref If Call call:isinstance Return return:yes If Call call:isinstance If Compare op:Eq Return return:yes Raise raises:ValueError('Unknown format')" - }, - { - "library": "pytorch", - "name": "patch_forward", - "source_code": "@contextmanager def patch_forward(obj: torch.nn.Module, new_method): original_method = obj.forward obj.forward = new_method.__get__(obj, obj.__class__) try: yield finally: obj.forward = original_method", - "docstring": "Helper method to make it easier to cleanly torch.export() a method on a module that is not .", - "type": "function", - "file_path": "pytorch\\torch\\export\\_trace.py", - "ast_data": "FunctionDef name:patch_forward arguments arg:obj type:torch.nn.Module arg:new_method Assign Assign Call call:__get__ Try Assign" - }, - { - "library": "feincms", - "name": "pre_save_handler", - "source_code": "def pre_save_handler(sender, instance, **kwargs): taglist = parse_tag_input(instance.tags) instance.tags = taglist_to_string(taglist)", - "docstring": "Intercept attempts to save and sort the tag field alphabetically, so we won't have different permutations in the filter list.", - "type": "function", - "file_path": "feincms\\feincms\\contrib\\tagging.py", - "ast_data": "FunctionDef name:pre_save_handler arguments arg:sender arg:instance kwarg:kwargs Assign Call call:parse_tag_input Assign Call call:taglist_to_string" - }, - { - "library": "pytorch", - "name": "assert_static", - "source_code": "def assert_static(self, val): assert not val.is_dynamic(), 'expected static but got dynamic (run with TORCH_LOGS = dynamic for more info)'", - "docstring": "Asserts that the int is static (and not dynamic, per dynamic shapes)", - "type": "method", - "file_path": "pytorch\\torch\\_dynamo\\comptime.py", - "ast_data": "FunctionDef name:assert_static arguments arg:self arg:val" - }, - { - "library": "pytorch", - "name": "get_score_mod", - "source_code": "def get_score_mod(self, score_mod: Optional[_score_mod_signature]) -> _score_mod_signature: if score_mod is None: score_mod = _identity def new_score_mod(score: torch.Tensor, b: torch.Tensor, h: torch.Tensor, q_idx: torch.Tensor, physical_kv_idx: torch.Tensor): physical_kv_block = physical_kv_idx // self.page_size physical_kv_offset = physical_kv_idx % self.page_size logical_block_idx = self.physical_to_logical[b, physical_kv_block] logical_kv_idx = logical_block_idx * self.page_size + physical_kv_offset return torch.where(logical_block_idx > = 0, score_mod(score, b, h, q_idx, logical_kv_idx), float('-inf')) return new_score_mod", - "docstring": "Converts a score_mod based on mapping from the physical block index to the logical block index. Args: score_mod (_score_mod_signature): score_mod based on the logical block index.", - "type": "method", - "file_path": "pytorch\\torch\\nn\\attention\\experimental\\_paged_attention.py", - "ast_data": "FunctionDef name:get_score_mod arguments arg:self arg:score_mod type:Optional[_score_mod_signature] If Compare op:Is Assign FunctionDef name:new_score_mod arguments arg:score type:torch.Tensor arg:b type:torch.Tensor arg:h type:torch.Tensor arg:q_idx type:torch.Tensor arg:physical_kv_idx type:torch.Tensor Assign Assign Assign Assign Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "Schwefel06", - "source_code": "class Schwefel06(Benchmark): def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N)) self.custom_bounds = ([-10.0, 10.0], [-10.0, 10.0]) self.global_optimum = [[1.0, 3.0]] self.fglob = 0.0 def fun(self, x, *args): self.nfev + = 1 return max(abs(x[0] + 2 * x[1] - 7), abs(2 * x[0] + x[1] - 5))", - "docstring": "Schwefel 6 objective function. This class defines the Schwefel 6 [1]_ global optimization problem. This is a unimodal minimization problem defined as follows: .. math:: f_{\\text{Schwefel06}}(x) = \\max(\\lvert x_1 + 2x_2 - 7 \\rvert, \\lvert 2x_1 + x_2 - 5 \\rvert) with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py", - "ast_data": "ClassDef name:Schwefel06 FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Return return:yes" - }, - { - "library": "tensorflow", - "name": "__call__", - "source_code": "def __call__(self, f: _T) -> _T: _stats_registry.register(f, self._op_type + ', ' + self._statistic_type) return f", - "docstring": "Registers \"f\" as the statistics function for \"op_type\".", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:f type:_T Return return:yes" - }, - { - "library": "tensorflow", - "name": "fill_empty_rows", - "source_code": "def fill_empty_rows(ragged_input, default_value, name = None): with ops.name_scope(name, 'RaggedFillEmptyRows', [ragged_input]): if not isinstance(ragged_input, ragged_tensor.RaggedTensor): raise TypeError(f'ragged_input must be RaggedTensor, got {type(ragged_input)}') default_value = ops.convert_to_tensor(default_value, dtype = ragged_input.dtype) output_value_rowids, output_values, empty_row_indicator, unused_reverse_index_map = gen_ragged_array_ops.ragged_fill_empty_rows(value_rowids = ragged_input.value_rowids(), values = ragged_input.values, nrows = ragged_input.nrows(), default_value = default_value) return (ragged_tensor.RaggedTensor.from_value_rowids(values = output_values, value_rowids = output_value_rowids, validate = False), empty_row_indicator)", - "docstring": "Fills empty rows in the input with rank 2 with a default value. This op adds entries with the specified for any row in the input that does not already have a value. The op also returns an indicator vector such that empty_row_indicator[i] = True iff row i was an empty row. Args: ragged_input: A with rank 2. default_value: The value to fill for empty rows, with the same type as name: A name prefix for the returned tensors (optional) Returns: ragged_ordered_output: A with all empty rows filled in with . empty_row_indicator: A bool vector indicating whether each input row was empty. Raises: TypeError: If is not a .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py", - "ast_data": "FunctionDef name:fill_empty_rows arguments arg:ragged_input arg:default_value arg:name With If Raise raises:TypeError(f'ragged_input must be RaggedTensor, got {type(ragged_input)}') Assign Call call:convert_to_tensor Assign Call call:ragged_fill_empty_rows Return return:yes" - }, - { - "library": "coconut", - "name": "target_info", - "source_code": "@property def target_info(self): return get_target_info(self.target)", - "docstring": "Return information on the current target as a version tuple.", - "type": "method", - "file_path": "coconut\\coconut\\compiler\\compiler.py", - "ast_data": "FunctionDef name:target_info arguments arg:self Return return:yes" - }, - { - "library": "kornia", - "name": "render_gaussian2d", - "source_code": "def render_gaussian2d(mean: Tensor, std: Tensor, size: tuple[int, int], normalized_coordinates: bool = True) -> Tensor: if not (std.dtype = = mean.dtype and std.device = = mean.device): raise TypeError('Expected inputs to have the same dtype and device') height, width = size grid = create_meshgrid(height, width, normalized_coordinates, mean.device) grid = grid.to(mean.dtype) pos_x = grid[..., 0].view(height, width) pos_y = grid[..., 1].view(height, width) dist_x = (pos_x - mean[..., 0, None, None]) ** 2 dist_y = (pos_y - mean[..., 1, None, None]) ** 2 k_x = -0.5 * torch.reciprocal(std[..., 0, None, None]) k_y = -0.5 * torch.reciprocal(std[..., 1, None, None]) exps_x = torch.exp(dist_x * k_x) exps_y = torch.exp(dist_y * k_y) gauss = exps_x * exps_y val_sum = gauss.sum(-2, keepdim = True).sum(-1, keepdim = True) gauss = _safe_zero_division(gauss, val_sum) return gauss", - "docstring": "Render the PDF of a 2D Gaussian distribution. Args: mean: the mean location of the Gaussian to render, :math:. Shape: :math:. std: the standard deviation of the Gaussian to render, :math:. Shape :math:. Should be able to be broadcast with . size: the (height, width) of the output image. normalized_coordinates: whether `[-1, 1](*, H, W)`.", - "type": "function", - "file_path": "kornia\\kornia\\geometry\\subpix\\dsnt.py", - "ast_data": "FunctionDef name:render_gaussian2d arguments arg:mean type:Tensor arg:std type:Tensor arg:size type:tuple[int, int] arg:normalized_coordinates type:bool If Raise raises:TypeError('Expected inputs to have the same dtype and device') Assign Assign Call call:create_meshgrid Assign Call call:to Assign Call call:view Assign Call call:view Assign Assign Assign Assign Assign Call call:exp Assign Call call:exp Assign Assign Call call:sum Assign Call call:_safe_zero_division Return return:yes" - }, - { - "library": "pytorch", - "name": "benchmark_combo_kernel", - "source_code": "def benchmark_combo_kernel(self, node_list: Sequence[BaseSchedulerNode]) -> tuple[float, float, list[Optional[str]]]: raise NotImplementedError", - "docstring": "Benchmark the list of nodes to combine and return the execution time and memory copy time in milliseconds on randomly generated inputs.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\scheduler.py", - "ast_data": "FunctionDef name:benchmark_combo_kernel arguments arg:self arg:node_list type:Sequence[BaseSchedulerNode] Raise raises:NotImplementedError" - }, - { - "library": "flexx", - "name": "submit", - "source_code": "@event.emitter def submit(self): self.user_done() d = {'old_value': self.text, 'new_value': self.text} return d", - "docstring": "Event emitted when the user strikes the enter or return key (but not when losing focus). Has `` attributes (which are the same).", - "type": "method", - "file_path": "flexx\\flexx\\ui\\widgets\\_lineedit.py", - "ast_data": "FunctionDef name:submit arguments arg:self Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "enable_history_recording", - "source_code": "@contextlib.contextmanager def enable_history_recording() -> Generator[None, None, None]: enabled = torch._C._cuda_isHistoryEnabled() try: if not enabled: torch.cuda.memory._record_memory_history() yield finally: if not enabled: torch.cuda.memory._record_memory_history(None)", - "docstring": "Turns on history recording in the CUDA Caching Allocator", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py", - "ast_data": "FunctionDef name:enable_history_recording arguments Assign Call call:_cuda_isHistoryEnabled Try If If" - }, - { - "library": "pytorch", - "name": "aten_op", - "source_code": "def aten_op(self, operator: str, *args, overload_name: str = '', **kwargs): return self.op('aten: : ATen', *args, operator_s = operator, overload_name_s = overload_name, **kwargs)", - "docstring": "Generates an ONNX ATen op node. This function is for backward compatibility with the old symbolic functions.", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\_internal\\jit_utils.py", - "ast_data": "FunctionDef name:aten_op arguments arg:self arg:operator type:str vararg:args kwarg:kwargs Return return:yes" - }, - { - "library": "pytorch", - "name": "get", - "source_code": "def get(self, key: str, default: Any = None) -> Any: return self.config.get(key, default)", - "docstring": "Return the value for ``.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py", - "ast_data": "FunctionDef name:get arguments arg:self arg:key type:str arg:default type:Any Return return:yes" - }, - { - "library": "cryptography", - "name": "build_ffi_for_binding", - "source_code": "def build_ffi_for_binding(module_name: str, module_prefix: str, modules: list[str]): types = [] includes = [] functions = [] customizations = [] for name in modules: __import__(module_prefix + name) module = sys.modules[module_prefix + name] types.append(module.TYPES) functions.append(module.FUNCTIONS) includes.append(module.INCLUDES) customizations.append(module.CUSTOMIZATIONS) verify_source = '\\n'.join(includes + customizations) return build_ffi(module_name, cdef_source = '\\n'.join(types + functions), verify_source = verify_source)", - "docstring": "Modules listed in ``: A string containing arbitrary top-level C code, this can be used to do things like test for a define and provide an alternate implementation based on that.", - "type": "function", - "file_path": "cryptography\\src\\_cffi_src\\utils.py", - "ast_data": "FunctionDef name:build_ffi_for_binding arguments arg:module_name type:str arg:module_prefix type:str arg:modules type:list[str] Assign Assign Assign Assign For Assign Assign Call call:join Return return:yes" - }, - { - "library": "django", - "name": "hasz", - "source_code": "@property def hasz(self): return self._z", - "docstring": "Return whether this coordinate sequence is 3D. This property value is inherited from the parent Geometry.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py", - "ast_data": "FunctionDef name:hasz arguments arg:self Return return:yes" - }, - { - "library": "feincms", - "name": "permalink", - "source_code": "def permalink(func): def inner(*args, **kwargs): return app_reverse(*func(*args, **kwargs)) return wraps(func)(inner)", - "docstring": "Decorator that calls app_reverse() Use this instead of standard django.db.models.permalink if you want to integrate the model through ApplicationContent. The wrapped function must return 4 instead of 3 arguments:: class MyModel(models.Model): @appmodels.permalink def get_absolute_url(self): return ('myapp.urls', 'model_detail', (), {'slug': self.slug})", - "type": "function", - "file_path": "feincms\\feincms\\content\\application\\models.py", - "ast_data": "FunctionDef name:permalink arguments arg:func FunctionDef name:inner arguments vararg:args kwarg:kwargs Return return:yes Return return:yes" - }, - { - "library": "scikit-learn", - "name": "transform", - "source_code": "def transform(self, X): return self._transform(X)", - "docstring": "Return class labels or probabilities for X for each estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. Returns ------- y_preds : ndarray of shape (n_samples, n_estimators) or (n_samples, n_classes * n_estimators) Prediction outputs for each estimator.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py", - "ast_data": "FunctionDef name:transform arguments arg:self arg:X Return return:yes" - }, - { - "library": "pytorch", - "name": "prepare_for_propagation_comparison", - "source_code": "def prepare_for_propagation_comparison(model: GraphModule) -> GraphModule: model = copy.deepcopy(model) for n in model.graph.nodes: if CUSTOM_KEY not in n.meta or NUMERIC_DEBUG_HANDLE_KEY not in n.meta[CUSTOM_KEY]: continue numeric_debug_handle = n.meta[CUSTOM_KEY][NUMERIC_DEBUG_HANDLE_KEY] _insert_logger(model, n, numeric_debug_handle) model.recompile() return model", - "docstring": "Add output loggers to node that has numeric_debug_handle Args: model (GraphModule): original model Returns: a model with output loggers for all nodes that has numeric_debug_handle_id", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\pt2e\\_numeric_debugger.py", - "ast_data": "FunctionDef name:prepare_for_propagation_comparison arguments arg:model type:GraphModule Assign Call call:deepcopy For If BoolOp Compare op:NotIn Compare op:NotIn Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "reset_accumulated_host_memory_stats", - "source_code": "def reset_accumulated_host_memory_stats() -> None: return torch._C._cuda_resetAccumulatedHostMemoryStats()", - "docstring": "Reset the \"accumulated\" (historical) stats tracked by the host memory allocator. See :func: for details. Accumulated stats correspond to the and keys in each individual stat dict.", - "type": "function", - "file_path": "pytorch\\torch\\cuda\\memory.py", - "ast_data": "FunctionDef name:reset_accumulated_host_memory_stats arguments Return return:yes" - }, - { - "library": "scipy", - "name": "get_thunk_type_set", - "source_code": "def get_thunk_type_set(): it_types = [] i_types = [] j = 0 getter_code = ' if (0) {}' for I_typenum, I_type in I_TYPES: piece = '\\n else if (I_typenum = = %(I_typenum)s) {\\n if (T_typenum = = -1) { return %(j)s; }' getter_code + = piece % dict(I_typenum = I_typenum, j = j) i_types.append((j, I_typenum, None, I_type, None)) j + = 1 for T_typenum, T_type in T_TYPES: piece = '\\n else if (T_typenum = = %(T_typenum)s) { return %(j)s; }' getter_code + = piece % dict(T_typenum = T_typenum, j = j) it_types.append((j, I_typenum, T_typenum, I_type, T_type)) j + = 1 getter_code + = '\\n }' return (i_types, it_types, GET_THUNK_CASE_TEMPLATE % dict(content = getter_code))", - "docstring": "Get a list containing cartesian product of data types, plus a getter routine. Returns ------- i_types : list [(j, I_typenum, None, I_type, None), ...] Pairing of index type numbers and the corresponding C++ types, and an unique index . This is for routines that are parameterized only by I but not by T. it_types : list [(j, I_typenum, T_typenum, I_type, T_type), ...] Same as , but for routines parameterized both by T and I. getter_code : str C++ code for a function that takes I_typenum, T_typenum and returns the unique index corresponding to the lists, or -1 if no match was found.", - "type": "function", - "file_path": "scipy\\scipy\\sparse\\_generate_sparsetools.py", - "ast_data": "FunctionDef name:get_thunk_type_set arguments Assign Assign Assign Assign For Assign For Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_longitude_grid_ends", - "source_code": "def set_longitude_grid_ends(self, degrees): self._longitude_cap = np.deg2rad(degrees) self._xaxis_pretransform.clear().scale(1.0, self._longitude_cap * 2.0).translate(0.0, -self._longitude_cap)", - "docstring": "Set the latitude(s) at which to stop drawing the longitude grids.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\projections\\geo.py", - "ast_data": "FunctionDef name:set_longitude_grid_ends arguments arg:self arg:degrees Assign Call call:deg2rad" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, bytes_per_pack = 0, timeout_seconds = None): pass", - "docstring": "Creates a CollectiveHints. Args: bytes_per_pack: a non-negative integer. Breaks collective operations into packs of certain size. If it's zero, the value is determined automatically. This only applies to all-reduce with currently. timeout_seconds: a float or None, timeout in seconds. If not None, the collective raises if it takes longer than this timeout. This can be useful when debugging hanging issues. This should only be used for debugging since it creates a new thread for each collective, i.e. an overhead of more threads. This only works for . Raises: ValueError: When arguments have invalid value.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\collective_util.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:bytes_per_pack arg:timeout_seconds" - }, - { - "library": "tensorflow", - "name": "tf_record_iterator", - "source_code": "@tf_export(v1 = ['io.tf_record_iterator', 'python_io.tf_record_iterator']) @deprecation.deprecated(date = None, instructions = 'Use eager execution and: \\n`tf.data.TFRecordDataset(path)`') def tf_record_iterator(path, options = None): compression_type = TFRecordOptions.get_compression_type_string(options) return _pywrap_record_io.RecordIterator(path, compression_type)", - "docstring": "An iterator that read the records from a TFRecords file. Args: path: The path to the TFRecords file. options: (optional) A TFRecordOptions object. Returns: An iterator of serialized TFRecords. Raises: IOError: If cannot be opened for reading.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\tf_record.py", - "ast_data": "FunctionDef name:tf_record_iterator arguments arg:path arg:options Call call:tf_export Call call:deprecated Assign Call call:get_compression_type_string Return return:yes" - }, - { - "library": "matplotlib", - "name": "interval_contains", - "source_code": "def interval_contains(interval, val): a, b = interval if a > b: a, b = (b, a) return a < = val < = b", - "docstring": "Check, inclusively, whether an interval includes a given value. Parameters ---------- interval : (float, float) The endpoints of the interval. val : float Value to check is within interval. Returns ------- bool Whether *val* is within the *interval*.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", - "ast_data": "FunctionDef name:interval_contains arguments arg:interval arg:val Assign If Compare op:Gt Assign Return return:yes" - }, - { - "library": "scipy", - "name": "resize", - "source_code": "def resize(self, shape): raise NotImplementedError(f'{type(self).__name__}.resize is not implemented')", - "docstring": "Resize the array/matrix in-place to dimensions given by `numpy.ndarray.resizenumpy.resize`. Here, the same data will be maintained at each index before and after reshape, if that index is within the new bounds. In numpy, resizing maintains contiguity of the array, moving elements around in the logical array but not within a flattened representation. We give no guarantees about whether the underlying data attributes (arrays, etc.) will be modified in place or replaced with new objects.", - "type": "method", - "file_path": "scipy\\scipy\\sparse\\_base.py", - "ast_data": "FunctionDef name:resize arguments arg:self arg:shape Raise raises:NotImplementedError(f'{type(self).__name__}.resize is not implemented')" - }, - { - "library": "tensorflow", - "name": "path_to_string", - "source_code": "def path_to_string(path): if isinstance(path, os.PathLike): return os.fspath(path) return path", - "docstring": "Convert objects to their string representation. If given a non-string typed path object, converts it to its string representation. If the object passed to is not among the above, then it is returned unchanged. This allows e.g. passthrough of file objects through this function. Args: path: object that represents a path Returns: A string representation of the path argument, if Python support exists.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\io_utils.py", - "ast_data": "FunctionDef name:path_to_string arguments arg:path If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "update_discrepancy", - "source_code": "def update_discrepancy(x_new: 'npt.ArrayLike', sample: 'npt.ArrayLike', initial_disc: DecimalNumber) -> float: sample = np.asarray(sample, dtype = np.float64, order = 'C') x_new = np.asarray(x_new, dtype = np.float64, order = 'C') if not sample.ndim = = 2: raise ValueError('Sample is not a 2D array') if sample.max() > 1.0 or sample.min() < 0.0: raise ValueError('Sample is not in unit hypercube') if not x_new.ndim = = 1: raise ValueError('x_new is not a 1D array') if not (np.all(x_new > = 0) and np.all(x_new < = 1)): raise ValueError('x_new is not in unit hypercube') if x_new.shape[0] ! = sample.shape[1]: raise ValueError('x_new and sample must be broadcastable') return _cy_wrapper_update_discrepancy(x_new, sample, initial_disc)", - "docstring": "Update the centered discrepancy with a new sample. Parameters ---------- x_new : array_like (1, d) The new sample to add in . sample : array_like (n, d) The initial sample. initial_disc : float Centered discrepancy of the . Returns ------- discrepancy : float Centered discrepancy of the sample composed of and . Examples -------- We can also compute iteratively the discrepancy by using ``. >>> import numpy as np >>> from scipy.stats import qmc >>> space = np.array([[1, 3], [2, 6], [3, 2], [4, 5], [5, 1], [6, 4]]) >>> l_bounds = [0.5, 0.5] >>> u_bounds = [6.5, 6.5] >>> space = qmc.scale(space, l_bounds, u_bounds, reverse=True) >>> disc_init = qmc.discrepancy(space[:-1], iterative=True) >>> disc_init 0.04769081147119336 >>> qmc.update_discrepancy(space[-1], space[:-1], disc_init) 0.008142039609053513", - "type": "function", - "file_path": "scipy\\scipy\\stats\\_qmc.py", - "ast_data": "FunctionDef name:update_discrepancy arguments arg:x_new type:'npt.ArrayLike' arg:sample type:'npt.ArrayLike' arg:initial_disc type:DecimalNumber Assign Call call:asarray Assign Call call:asarray If Raise raises:ValueError('Sample is not a 2D array') If BoolOp Compare op:Gt Compare op:Lt Raise raises:ValueError('Sample is not in unit hypercube') If Raise raises:ValueError('x_new is not a 1D array') If Raise raises:ValueError('x_new is not in unit hypercube') If Compare op:NotEq Raise raises:ValueError('x_new and sample must be broadcastable') Return return:yes" - }, - { - "library": "pytorch", - "name": "__init__", - "source_code": "def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1): super().__init__() self.cost_class = cost_class self.cost_bbox = cost_bbox self.cost_giou = cost_giou assert cost_class ! = 0 or cost_bbox ! = 0 or cost_giou ! = 0, 'all costs cant be 0'", - "docstring": "Creates the matcher Params: cost_class: This is the relative weight of the classification error in the matching cost cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost", - "type": "method", - "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:cost_class type:float arg:cost_bbox type:float arg:cost_giou type:float Assign Assign Assign" - }, - { - "library": "sphinx", - "name": "split", - "source_code": "def split(self, input: str) -> list[str]: return self._word_re.findall(input)", - "docstring": "This method splits a sentence into words. Default splitter splits input at white spaces, which should be enough for most languages except CJK languages.", - "type": "method", - "file_path": "sphinx\\sphinx\\search\\__init__.py", - "ast_data": "FunctionDef name:split arguments arg:self arg:input type:str Return return:yes" - }, - { - "library": "pytorch", - "name": "set_fusion_strategy", - "source_code": "def set_fusion_strategy(strategy: list[tuple[str, int]]): return torch._C._jit_set_fusion_strategy(strategy)", - "docstring": "Set the type and number of specializations that can occur during fusion. Usage: provide a list of pairs (type, depth) where type is one of \"STATIC\" or \"DYNAMIC\" and depth is an integer. Behavior - static vs dynamic: In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined based on some initial profiling runs. In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple shapes are possible. In both cases, we also recompile on new striding behavior, device, or dtype. Behavior - fallback functions & depth: When an input doesn't match the format required by the specialized compiled op, it will run a fallback function. Fallback functions are recursively be compiled and specialized based on the observed tensor shapes. Since compilation can be slow, the \"depth\" parameter is provided to limit the number of specializations that can be compiled, before giving up on recompiling and falling back to a completely un-fused, un-specialized implementation. The list of (type, depth) pairs controls the type of specializations and the number of specializations. For example: [(\"STATIC\", 2), (\"DYNAMIC\", 2)] indicates that the first two specializations will use static fusions, the following two specializations will use dynamic fusion, and any inputs that satisfy none of the 4 options will run an unfused implementation. NB: in the future, if more as more fusion backends are added there may be more granular apis for specific fusers.", - "type": "function", - "file_path": "pytorch\\torch\\jit\\_fuser.py", - "ast_data": "FunctionDef name:set_fusion_strategy arguments arg:strategy type:list[tuple[str, int]] Return return:yes" - }, - { - "library": "tensorflow", - "name": "parameterized_truncated_normal", - "source_code": "def parameterized_truncated_normal(shape, means = 0.0, stddevs = 1.0, minvals = -2.0, maxvals = 2.0, dtype = dtypes.float32, seed = None, name = None): with ops.name_scope(name, 'parameterized_truncated_normal', [shape, means, stddevs, minvals, maxvals]) as name: shape_tensor = shape_util.shape_tensor(shape) means_tensor = ops.convert_to_tensor(means, dtype = dtype, name = 'means') stddevs_tensor = ops.convert_to_tensor(stddevs, dtype = dtype, name = 'stddevs') minvals_tensor = ops.convert_to_tensor(minvals, dtype = dtype, name = 'minvals') maxvals_tensor = ops.convert_to_tensor(maxvals, dtype = dtype, name = 'maxvals') seed1, seed2 = random_seed.get_seed(seed) rnd = gen_random_ops.parameterized_truncated_normal(shape_tensor, means_tensor, stddevs_tensor, minvals_tensor, maxvals_tensor, seed = seed1, seed2 = seed2) shape_util.maybe_set_static_shape(rnd, shape) return rnd", - "docstring": "Outputs random values from a truncated normal distribution. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than 2 standard deviations from the mean are dropped and re-picked. Args: shape: A 1-D integer Tensor or Python array. The shape of the output tensor. means: A 0-D Tensor or Python value of type . The mean of the truncated normal distribution. stddevs: A 0-D Tensor or Python value of type . The standard deviation of the truncated normal distribution. minvals: A 0-D Tensor or Python value of type . The minimum value of the truncated normal distribution. maxvals: A 0-D Tensor or Python value of type . The maximum value of the truncated normal distribution. dtype: The type of the output. seed: A Python integer. Used to create a random seed for the distribution. See for behavior. name: A name for the operation (optional). Returns: A tensor of the specified shape filled with random truncated normal values.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\random_ops.py", - "ast_data": "FunctionDef name:parameterized_truncated_normal arguments arg:shape arg:means arg:stddevs arg:minvals arg:maxvals arg:dtype arg:seed arg:name With Assign Call call:shape_tensor Assign Call call:convert_to_tensor Assign Call call:convert_to_tensor Assign Call call:convert_to_tensor Assign Call call:convert_to_tensor Assign Call call:get_seed Assign Call call:parameterized_truncated_normal Return return:yes" - }, - { - "library": "mongo", - "name": "get_default_database", - "source_code": "def get_default_database(self, default: Optional[str] = None, codec_options: Optional[CodecOptions[_DocumentTypeArg]] = None, read_preference: Optional[_ServerMode] = None, write_concern: Optional[WriteConcern] = None, read_concern: Optional[ReadConcern] = None) -> database.AsyncDatabase[_DocumentType]: if self._default_database_name is None and default is None: raise ConfigurationError('No default database name defined or provided.') name = cast(str, self._default_database_name or default) return database.AsyncDatabase(self, name, codec_options, read_preference, write_concern, read_concern)", - "docstring": "Get the database named in the MongoDB connection URI. >>> uri = 'mongodb://host/my_database' >>> client = AsyncMongoClient(uri) >>> db = client.get_default_database() >>> assert db.name == 'my_database' >>> db = client.get_database() >>> assert db.name == 'my_database' Useful in scripts where you want to choose which database to use based only on the URI in a configuration file. :param default: the database name to use if no database name was provided in the URI. :param codec_options: An instance of :class:. If `codec_optionsAsyncMongoClientread_preferenceAsyncMongoClient~pymongo.read_preferences~pymongo.write_concern.WriteConcernwrite_concernAsyncMongoClient~pymongo.read_concern.ReadConcernread_concernAsyncMongoClientget_database` instead.", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\mongo_client.py", - "ast_data": "FunctionDef name:get_default_database arguments arg:self arg:default type:Optional[str] arg:codec_options type:Optional[CodecOptions[_DocumentTypeArg]] arg:read_preference type:Optional[_ServerMode] arg:write_concern type:Optional[WriteConcern] arg:read_concern type:Optional[ReadConcern] If BoolOp Compare op:Is Compare op:Is Raise raises:ConfigurationError('No default database name defined or provided.') Assign Call call:cast Return return:yes" - }, - { - "library": "algorithms", - "name": "alice_public_key", - "source_code": "def alice_public_key(a_pr_k, a, p): return pow(a, a_pr_k) % p", - "docstring": "Alice calculate her public key with her private key. This is open to public", - "type": "function", - "file_path": "algorithms\\algorithms\\maths\\diffie_hellman_key_exchange.py", - "ast_data": "FunctionDef name:alice_public_key arguments arg:a_pr_k arg:a arg:p Return return:yes" - }, - { - "library": "pandas", - "name": "f", - "source_code": "def f(self, node, *args, **kwargs): return partial(op_class, op_symbol, *args, **kwargs)", - "docstring": "Return a partial function with an Op subclass with an operator already passed. Returns ------- callable", - "type": "function", - "file_path": "pandas\\pandas\\core\\computation\\expr.py", - "ast_data": "FunctionDef name:f arguments arg:self arg:node vararg:args kwarg:kwargs Return return:yes" - }, - { - "library": "pytorch", - "name": "__call__", - "source_code": "def __call__(self, graph_module: torch.fx.GraphModule, args) -> torch.fx.GraphModule: if self._options.use_aot_autograd: from functorch.compile import min_cut_rematerialization_partition from torch._dynamo.backends.common import aot_autograd return aot_autograd(fw_compiler = self.compile, partition_fn = min_cut_rematerialization_partition, decompositions = self._resolved_onnx_exporter_options.decomposition_table)(graph_module, args) return self.compile(graph_module, args)", - "docstring": "If `auto_autograd` method is invoked directly.", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\_internal\\onnxruntime.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:graph_module type:torch.fx.GraphModule arg:args If Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "peek", - "source_code": "def peek(self, index, name = None): if name is None: name = '%s_peek' % self._name fn = lambda: gen_data_flow_ops.stage_peek(index, dtypes = self._dtypes, shared_name = self._name, name = name, capacity = self._capacity, memory_limit = self._memory_limit) return self.__internal_get(fn, name)", - "docstring": "Peeks at an element in the staging area. If the staging area is too small to contain the element at the specified index, it will block until enough elements are inserted to complete the operation. The placement of the returned tensor will be determined by the current device scope when this function is called. Args: index: The index of the tensor within the staging area to look up. name: A name for the operation (optional). Returns: The tuple of tensors that was gotten.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py", - "ast_data": "FunctionDef name:peek arguments arg:self arg:index arg:name If Compare op:Is Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "synchronize", - "source_code": "def synchronize(self) -> None: torch._C._mps_synchronizeEvent(self.__eventId)", - "docstring": "Waits until the completion of all work currently captured in this event. This prevents the CPU thread from proceeding until the event completes.", - "type": "method", - "file_path": "pytorch\\torch\\mps\\event.py", - "ast_data": "FunctionDef name:synchronize arguments arg:self" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, learning_rate: Union[float, Callable[[], float]] = 0.01, use_gradient_accumulation: bool = True, clip_weight_min: Optional[float] = None, clip_weight_max: Optional[float] = None, weight_decay_factor: Optional[float] = None, multiply_weight_decay_factor_by_learning_rate: Optional[bool] = None, clipvalue: Optional[ClipValueType] = None, low_dimensional_packing_status: bool = False): super().__init__(learning_rate, use_gradient_accumulation, clip_weight_min, clip_weight_max, weight_decay_factor, multiply_weight_decay_factor_by_learning_rate, clipvalue, None, low_dimensional_packing_status)", - "docstring": "Optimization parameters for stochastic gradient descent. Args: learning_rate: The learning rate. It should be a floating point value or a callable taking no arguments for a dynamic learning rate. use_gradient_accumulation: setting this to makes embedding gradients calculation less accurate but faster. clip_weight_min: the minimum value to clip by; None means -infinity. clip_weight_max: the maximum value to clip by; None means +infinity. weight_decay_factor: amount of weight decay to apply; None means that the weights are not decayed. Weights are decayed by multiplying the weight by this factor each step. multiply_weight_decay_factor_by_learning_rate: if true, is multiplied by the current learning rate. clipvalue: Controls clipping of the gradient. Set to either a single positive scalar value to get clipping or a tiple of scalar values (min, max) to set a separate maximum or minimum. If one of the two entries is None, then there will be no clipping that direction. Note if this is set, you may see a decrease in performance as gradient accumulation will be enabled (it is normally off for SGD as it has no affect on accuracy). See 'tensorflow/core/protobuf/tpu/optimization_parameters.proto' for more information on gradient accumulation and its impact on tpu embeddings. low_dimensional_packing_status: Status of the low-dimensional embedding packing optimization controls whether to optimize the packing of 1-dimensional, 2-dimensional, and 4-dimensional embedding tables in memory.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2_utils.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:learning_rate type:Union[float, Callable[[], float]] arg:use_gradient_accumulation type:bool arg:clip_weight_min type:Optional[float] arg:clip_weight_max type:Optional[float] arg:weight_decay_factor type:Optional[float] arg:multiply_weight_decay_factor_by_learning_rate type:Optional[bool] arg:clipvalue type:Optional[ClipValueType] arg:low_dimensional_packing_status type:bool" - }, - { - "library": "pytorch", - "name": "requires_grad_", - "source_code": "def requires_grad_(self, requires_grad: bool = True) -> Self: for p in self.parameters(): p.requires_grad_(requires_grad) return self", - "docstring": "Change if autograd should record operations on parameters in this module. This method sets the parameters' :attr: attributes in-place. This method is helpful for freezing part of the module for finetuning or training parts of a model individually (e.g., GAN training). See :ref: for a comparison between and several similar mechanisms that may be confused with it. Args: requires_grad (bool): whether autograd should record operations on parameters in this module. Default: ``. Returns: Module: self", - "type": "method", - "file_path": "pytorch\\torch\\nn\\modules\\module.py", - "ast_data": "FunctionDef name:requires_grad_ arguments arg:self arg:requires_grad type:bool For Call call:parameters Return return:yes" - }, - { - "library": "feincms", - "name": "ajax_editable_boolean_cell", - "source_code": "def ajax_editable_boolean_cell(item, attr, text = '', override = None): if text: text = ' (%s)' % text if override is not None: a = [django_boolean_icon(override, text), text] else: value = getattr(item, attr) a = ['' % (item.pk, attr, 'checked = \"checked\"' if value else '')] a.insert(0, '
' % (attr, item.pk)) a.append('
') return mark_safe(''.join(a))", - "docstring": "Generate a html snippet for showing a boolean value on the admin page. Item is an object, attr is the attribute name we should display. Text is an optional explanatory text to be included in the output. This function will emit code to produce a checkbox input with its state corresponding to the item.attr attribute if no override value is passed. This input is wired to run a JS ajax updater to toggle the value. If override is passed in, ignores the attr attribute and returns a static image for the override boolean with no user interaction possible (useful for \"disabled and you can't change it\" situations).", - "type": "function", - "file_path": "feincms\\feincms\\admin\\tree_editor.py", - "ast_data": "FunctionDef name:ajax_editable_boolean_cell arguments arg:item arg:attr arg:text arg:override If Assign If Compare op:IsNot Assign Assign Call call:getattr Assign Return return:yes" - }, - { - "library": "salmon", - "name": "attempt_decoding", - "source_code": "def attempt_decoding(charset, dec): try: if isinstance(dec, str): return dec else: return dec.decode(charset) except (UnicodeError, LookupError): return guess_encoding_and_decode(charset, dec)", - "docstring": "Attempts to decode bytes into unicode, calls guess_encoding_and_decode if the given charset is wrong.", - "type": "function", - "file_path": "salmon\\salmon\\encoding.py", - "ast_data": "FunctionDef name:attempt_decoding arguments arg:charset arg:dec Try If Call call:isinstance Return return:yes Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "pytorch", - "name": "SymIntEqByExpr", - "source_code": "class SymIntEqByExpr: val: Union[torch.SymInt, int] def __init__(self, val: Union[torch.SymInt, int]) -> None: self.val = val def __repr__(self) -> str: return repr(self.val) def _extract(self) -> sympy.Expr: if isinstance(self.val, torch.SymInt): return self.val.node.expr else: return sympy.Integer(self.val) def __eq__(self, other: object) -> bool: assert isinstance(other, SymIntEqByExpr) if type(self.val) is int and type(other.val) is int: return self.val = = other.val return self._extract() = = other._extract() def __hash__(self) -> int: return hash(self._extract())", - "docstring": "This is a wrapper around SymInt which has alternative semantics for equality. Specifically, instead of erroring or guarding, we instead will hash/compare equality based on the underlying sympy expression; e.g., s0 and s1 will always compare as False. NB: This does NOT do fancy analysis that maybe_evaluate_static does; we can only reason through equalities that occur because to expressions canonicalize to the same expression via regular simplification.", - "type": "class", - "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", - "ast_data": "ClassDef name:SymIntEqByExpr FunctionDef name:__init__ arguments arg:self arg:val type:Union[torch.SymInt, int] Assign FunctionDef name:__repr__ arguments arg:self Return return:yes FunctionDef name:_extract arguments arg:self If Call call:isinstance Return return:yes Return return:yes FunctionDef name:__eq__ arguments arg:self arg:other type:object If BoolOp Compare op:Is Compare op:Is Return return:yes Return return:yes FunctionDef name:__hash__ arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "get_logdir", - "source_code": "def get_logdir(self): return self.log_dir", - "docstring": "Return the directory where event files will be written.", - "type": "method", - "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py", - "ast_data": "FunctionDef name:get_logdir arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "save_storage", - "source_code": "def save_storage(self, storage: Storage, offset: int = 0) -> None: assert self.handle is not None, 'Cannot save data to a file that is not registered.' torch._C._gds_save_storage(self.handle, storage, offset)", - "docstring": "Saves data from the storage into the file. This is a wrapper around ``. Args: storage (Storage): Storage to save data from. offset (int, optional): Offset into the file to start saving to. (Default: 0)", - "type": "method", - "file_path": "pytorch\\torch\\cuda\\gds.py", - "ast_data": "FunctionDef name:save_storage arguments arg:self arg:storage type:Storage arg:offset type:int" - }, - { - "library": "numpy", - "name": "write_release", - "source_code": "@task def write_release(options): rdir = options.installers.releasedir write_release_task(options, os.path.join(rdir, 'README'))", - "docstring": "Write the README files. Two README files are generated from the release notes, one in `` decorator.", - "type": "function", - "file_path": "numpy\\pavement.py", - "ast_data": "FunctionDef name:write_release arguments arg:options Assign" - }, - { - "library": "pandas", - "name": "is_monotonic_increasing", - "source_code": "@property def is_monotonic_increasing(self) -> bool: return self._engine.is_monotonic_increasing", - "docstring": "Return a boolean if the values are equal or increasing. Returns ------- bool See Also -------- Index.is_monotonic_decreasing : Check if the values are equal or decreasing. Examples -------- >>> pd.Index([1, 2, 3]).is_monotonic_increasing True >>> pd.Index([1, 2, 2]).is_monotonic_increasing True >>> pd.Index([1, 3, 2]).is_monotonic_increasing False", - "type": "method", - "file_path": "pandas\\pandas\\core\\indexes\\base.py", - "ast_data": "FunctionDef name:is_monotonic_increasing arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "intersects", - "source_code": "def intersects(self, other): return self._topology(capi.ogr_intersects, other)", - "docstring": "Return True if this geometry intersects with the other.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", - "ast_data": "FunctionDef name:intersects arguments arg:self arg:other Return return:yes" - }, - { - "library": "pytorch", - "name": "is_bf16_supported", - "source_code": "def is_bf16_supported(including_emulation: bool = True): if torch.version.hip: return True if not is_available(): return False device = torch.cuda.current_device() cuda_version = torch.version.cuda if cuda_version is not None and torch.cuda.get_device_properties(device).major > = 8: return True if not including_emulation: return False return _check_bf16_tensor_supported(device)", - "docstring": "Return a bool indicating if the current CUDA/ROCm device supports dtype bfloat16.", - "type": "function", - "file_path": "pytorch\\torch\\cuda\\__init__.py", - "ast_data": "FunctionDef name:is_bf16_supported arguments arg:including_emulation type:bool If Return return:yes If Return return:yes Assign Call call:current_device Assign If BoolOp Compare op:IsNot Compare op:GtE Return return:yes If Return return:yes Return return:yes" - }, - { - "library": "scikit-learn", - "name": "predict", - "source_code": "@available_if(_check_novelty_predict) def predict(self, X = None): return self._predict(X)", - "docstring": "Predict the labels (1 inlier, -1 outlier) of X according to LOF. **Only available for novelty detection (when novelty is set to True).** This method allows to generalize prediction to *new observations* (not in the training set). Note that the result of ``. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The query sample or samples to compute the Local Outlier Factor w.r.t. the training samples. Returns ------- is_inlier : ndarray of shape (n_samples,) Returns -1 for anomalies/outliers and +1 for inliers.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\neighbors\\_lof.py", - "ast_data": "FunctionDef name:predict arguments arg:self arg:X Call call:available_if Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_parse_math", - "source_code": "def get_parse_math(self): return self._parse_math", - "docstring": "Return whether mathtext parsing is considered for this .", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\text.py", - "ast_data": "FunctionDef name:get_parse_math arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "is_directory_v2", - "source_code": "@tf_export('io.gfile.isdir') def is_directory_v2(path): try: return _pywrap_file_io.IsDirectory(compat.path_to_bytes(path)) except errors.OpError: return False", - "docstring": "Returns whether the path is a directory or not. Args: path: string, path to a potential directory Returns: True, if the path is a directory; False otherwise", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", - "ast_data": "FunctionDef name:is_directory_v2 arguments arg:path Call call:tf_export Try Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "authlib", - "name": "register_token_generator", - "source_code": "def register_token_generator(self, grant_type, func): self._token_generators[grant_type] = func", - "docstring": "Register a function as token generator for the given ``:: def generate_bearer_token( grant_type, client, user=None, scope=None, expires_in=None, include_refresh_token=True, ): token = {\"token_type\": \"Bearer\", \"access_token\": ...} if include_refresh_token: token[\"refresh_token\"] = ... ... return token authorization_server.register_token_generator( \"default\", generate_bearer_token ) If you register a generator for a certain grant type, that generator will only works for the given grant type:: authorization_server.register_token_generator( \"client_credentials\", generate_bearer_token, ) :param grant_type: string name of the grant type :param func: a function to generate token", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py", - "ast_data": "FunctionDef name:register_token_generator arguments arg:self arg:grant_type arg:func Assign" - }, - { - "library": "pytorch", - "name": "sequential_split", - "source_code": "def sequential_split(gm: torch.fx.GraphModule, node_call_back: Callable[[torch.fx.Node], Union[torch.fx.Node, bool]]) -> torch.fx.GraphModule: from torch.fx.passes.split_module import split_module split_map = {} split_id = 0 for node in gm.graph.nodes: if node_call_back(node): split_id + = 1 split_map[node] = split_id new_gm = split_module(gm, gm, lambda node: split_map[node], keep_original_order = True, keep_original_node_name = True) new_gm.graph._codegen = gm.graph._codegen new_gm.recompile() return new_gm", - "docstring": "sequential_split creates a new graph module that splits the input graph module into multiple submodules based on the node_call_back. It doesn't mutate the input graph module. The node_call_back should return True if the node is a delimiter. Delimiter will be the first node in the next submodule.", - "type": "function", - "file_path": "pytorch\\torch\\_export\\utils.py", - "ast_data": "FunctionDef name:sequential_split arguments arg:gm type:torch.fx.GraphModule arg:node_call_back type:Callable[[torch.fx.Node], Union[torch.fx.Node, bool]] Assign Assign For If Call call:node_call_back Assign Assign Call call:split_module Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "function_callback", - "source_code": "def function_callback(self, function): graph_id = self._get_context_id(function.graph) with self._context_lock: self._function_to_graph_id[function] = graph_id", - "docstring": "A callback to be called on creation of ConcreteFunctions.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\dumping_callback.py", - "ast_data": "FunctionDef name:function_callback arguments arg:self arg:function Assign Call call:_get_context_id With Assign" - }, - { - "library": "tensorflow", - "name": "as_graph_element", - "source_code": "def as_graph_element(self, obj, allow_tensor = True, allow_operation = True) -> Union[tensor_lib.Tensor, 'Operation']: if self._finalized: return self._as_graph_element_locked(obj, allow_tensor, allow_operation) with self._lock: return self._as_graph_element_locked(obj, allow_tensor, allow_operation)", - "docstring": "Returns the object referred to by , as an or . This function validates that represents an element of this graph, and gives an informative error message if it is not. This function is the canonical way to get/validate an object of one of the allowed types from an external argument reference in the Session API. This method may be called concurrently from multiple threads. Args: obj: A , an , or the name of a tensor or operation. Can also be any object with an method that returns a value of one of these types. Note: will be called inside the graph's lock and so may not modify the graph. allow_tensor: If true, may refer to a . allow_operation: If true, may refer to an . Returns: The or in the Graph corresponding to . Raises: TypeError: If is not a type we support attempting to convert to types. ValueError: If is of an appropriate type but invalid. For example, an invalid string. KeyError: If is not an object in the graph.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", - "ast_data": "FunctionDef name:as_graph_element arguments arg:self arg:obj arg:allow_tensor arg:allow_operation If Return return:yes With Return return:yes" - }, - { - "library": "mongo", - "name": "validate_integer", - "source_code": "def validate_integer(option: str, value: Any) -> int: if isinstance(value, int): return value elif isinstance(value, str): try: return int(value) except ValueError: raise ValueError(f'The value of {option} must be an integer') from None raise TypeError(f'Wrong type for {option}, value must be an integer, not {type(value)}')", - "docstring": "Validates that 'value' is an integer (or basestring representation).", - "type": "function", - "file_path": "mongo\\pymongo\\common.py", - "ast_data": "FunctionDef name:validate_integer arguments arg:option type:str arg:value type:Any If Call call:isinstance Return return:yes If Call call:isinstance Try Return return:yes ExceptHandler Raise raises:ValueError(f'The value of {option} must be an integer') Raise raises:TypeError(f'Wrong type for {option}, value must be an integer, not {type(value)}')" - }, - { - "library": "coconut", - "name": "highlight", - "source_code": "def highlight(code, force = False): from coconut.terminal import logger if force or (logger.enable_colors(sys.stdout) and logger.enable_colors(sys.stderr)): try: from coconut.highlighter import highlight_coconut_for_terminal except ImportError: logger.log_exc() else: code_base, code_white = split_trailing_whitespace(code) return highlight_coconut_for_terminal(code_base).rstrip() + code_white return code", - "docstring": "Attempt to highlight Coconut code for the terminal.", - "type": "function", - "file_path": "coconut\\coconut\\util.py", - "ast_data": "FunctionDef name:highlight arguments arg:code arg:force If BoolOp BoolOp Call call:enable_colors Call call:enable_colors Try ExceptHandler Assign Call call:split_trailing_whitespace Return return:yes Return return:yes" - }, - { - "library": "numpy", - "name": "read_array_header_2_0", - "source_code": "@set_module('numpy.lib.format') def read_array_header_2_0(fp, max_header_size = _MAX_HEADER_SIZE): return _read_array_header(fp, version = (2, 0), max_header_size = max_header_size)", - "docstring": "Read an array header from a filelike object using the 2.0 file format version. This will leave the file object located just after the header. Parameters ---------- fp : filelike object A file object or something with a method like a file. max_header_size : int, optional Maximum allowed size of the header. Large headers may not be safe to load securely and thus require explicitly passing a larger value. See :py:func: for details. Returns ------- shape : tuple of int The shape of the array. fortran_order : bool The array data will be written out directly if it is either C-contiguous or Fortran-contiguous. Otherwise, it will be made contiguous before writing it out. dtype : dtype The dtype of the file's data. Raises ------ ValueError If the data is invalid.", - "type": "function", - "file_path": "numpy\\numpy\\lib\\_format_impl.py", - "ast_data": "FunctionDef name:read_array_header_2_0 arguments arg:fp arg:max_header_size Call call:set_module Return return:yes" - }, - { - "library": "pandas", - "name": "hasnans", - "source_code": "@cache_readonly def hasnans(self) -> bool: if self._can_hold_na: return bool(self._isnan.any()) else: return False", - "docstring": "Return True if there are any NaNs. Enables various performance speedups. Returns ------- bool See Also -------- Index.isna : Detect missing values. Index.dropna : Return Index without NA/NaN values. Index.fillna : Fill NA/NaN values with the specified value. Examples -------- >>> s = pd.Series([1, 2, 3], index=[\"a\", \"b\", None]) >>> s a 1 b 2 None 3 dtype: int64 >>> s.index.hasnans True", - "type": "method", - "file_path": "pandas\\pandas\\core\\indexes\\base.py", - "ast_data": "FunctionDef name:hasnans arguments arg:self If Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "read_char_array", - "source_code": "def read_char_array(self, hdr): arr = self.read_sub_array(hdr).astype(np.uint8) S = arr.tobytes().decode('latin-1') return np.ndarray(shape = hdr.dims, dtype = np.dtype('U1'), buffer = np.array(S)).copy()", - "docstring": "latin-1 text matrix (char matrix) reader Parameters ---------- hdr : `hdr`", - "type": "method", - "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py", - "ast_data": "FunctionDef name:read_char_array arguments arg:self arg:hdr Assign Call call:astype Assign Call call:decode Return return:yes" - }, - { - "library": "kornia", - "name": "__init__", - "source_code": "def __init__(self, num_dims: int, num_freqs: int, log_space: bool = False) -> None: super().__init__() self._num_dims = num_dims self._embed_fns = [lambda x: x] freq_bands: Tensor if log_space: freq_bands = 2.0 ** torch.linspace(0.0, num_freqs - 1, num_freqs) else: freq_bands = torch.linspace(2.0 ** 0.0, 2.0 ** (num_freqs - 1), num_freqs) for freq in freq_bands: self._embed_fns.append(partial(_torch_sin, freq = freq)) self._embed_fns.append(partial(_torch_cos, freq = freq)) self._num_encoded_dims = self._num_dims * len(self._embed_fns)", - "docstring": "Initialize positional encoder. Args: num_dims: Number of input dimensions (channels): int num_freqs: Number of frequency bands for encoding span: int log_space: Whether frequency sampling should be log spaced: bool", - "type": "method", - "file_path": "kornia\\kornia\\nerf\\positional_encoder.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:num_dims type:int arg:num_freqs type:int arg:log_space type:bool Assign Assign If Assign Assign Call call:linspace For Assign" - }, - { - "library": "matplotlib", - "name": "toolmanager_connect", - "source_code": "def toolmanager_connect(self, s, func): return self._callbacks.connect(s, func)", - "docstring": "Connect event with string *s* to *func*. Parameters ---------- s : str The name of the event. The following events are recognized: - 'tool_message_event' - 'tool_removed_event' - 'tool_added_event' For every tool added a new event is created - 'tool_trigger_TOOLNAME', where TOOLNAME is the id of the tool. func : callable Callback function for the toolmanager event with signature:: def func(event: ToolEvent) -> Any Returns ------- cid The callback id for the connection. This can be used in .", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py", - "ast_data": "FunctionDef name:toolmanager_connect arguments arg:self arg:s arg:func Return return:yes" - }, - { - "library": "kornia", - "name": "project", - "source_code": "def project(self, point_3d: Tensor) -> Tensor: P = self.intrinsics @ self.extrinsics return convert_points_from_homogeneous(transform_points(P, point_3d))", - "docstring": "Project a 3d point in world coordinates onto the 2d camera plane. Args: point_3d: tensor containing the 3d points to be projected to the camera plane. The shape of the tensor can be :math:. Returns: tensor of (u, v) cam coordinates with shape :math:. Example: >>> _ = torch.manual_seed(0) >>> X = torch.rand(1, 3) >>> K = torch.eye(4)[None] >>> E = torch.eye(4)[None] >>> h = torch.ones(1) >>> w = torch.ones(1) >>> pinhole = kornia.geometry.camera.PinholeCamera(K, E, h, w) >>> pinhole.project(X) tensor([[5.6088, 8.6827]])", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py", - "ast_data": "FunctionDef name:project arguments arg:self arg:point_3d type:Tensor Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "update", - "source_code": "def update(self, modules: Mapping[str, Module]) -> None: if not isinstance(modules, container_abcs.Iterable): raise TypeError('ModuleDict.update should be called with an iterable of key/value pairs, but got ' + type(modules).__name__) if isinstance(modules, (OrderedDict, ModuleDict, container_abcs.Mapping)): for key, module in modules.items(): self[key] = module else: for j, m in enumerate(modules): if not isinstance(m, container_abcs.Iterable): raise TypeError('ModuleDict update sequence element #' + str(j) + ' should be Iterable; is' + type(m).__name__) if not len(m) = = 2: raise ValueError('ModuleDict update sequence element #' + str(j) + ' has length ' + str(len(m)) + '; 2 is required') self[m[0]] = m[1]", - "docstring": "Update the :class: with key-value pairs from a mapping, overwriting existing keys. .. note:: If :attr: is an `~torch.nn.ModuleDict~torch.nn.Module~torch.nn.Module`)", - "type": "method", - "file_path": "pytorch\\torch\\nn\\modules\\container.py", - "ast_data": "FunctionDef name:update arguments arg:self arg:modules type:Mapping[str, Module] If Raise raises:TypeError('ModuleDict.update should be called with an iterable of key/value pairs, but got ' + type(modules).__name__) If Call call:isinstance For Call call:items Assign For Call call:enumerate If Raise raises:TypeError('ModuleDict update sequence element #' + str(j) + ' should be Iterable; is' + type(m).__name__) If Raise raises:ValueError('ModuleDict update sequence element #' + str(j) + ' has length ' + str(len(m)) + '; 2 is required') Assign" - }, - { - "library": "pytorch", - "name": "module_class_name", - "source_code": "@property def module_class_name(self) -> str: if self._module_class is None: return '' if isinstance(self._module_class, type): return self._module_class.__name__ return self._module_class", - "docstring": "Name of the module class. E.g. .", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py", - "ast_data": "FunctionDef name:module_class_name arguments arg:self If Compare op:Is Return return:yes If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "compression_formats", - "source_code": "@cached_property def compression_formats(self): compression_formats = {None: (open, 'rb'), 'gz': (gzip.GzipFile, 'rb'), 'zip': (SingleZipReader, 'r'), 'stdin': (lambda *args: sys.stdin, None)} if has_bz2: compression_formats['bz2'] = (bz2.BZ2File, 'r') if has_lzma: compression_formats['lzma'] = (lzma.LZMAFile, 'r') compression_formats['xz'] = (lzma.LZMAFile, 'r') return compression_formats", - "docstring": "A dict mapping format names to (open function, mode arg) tuples.", - "type": "method", - "file_path": "django\\django\\core\\management\\commands\\loaddata.py", - "ast_data": "FunctionDef name:compression_formats arguments arg:self Assign If Assign If Assign Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "select", - "source_code": "def select(self): if sys.platform = = 'win32': self.dc.SelectObject(self.bitmap) self.IsSelected = True", - "docstring": "Select the current bitmap into this wxDC instance.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py", - "ast_data": "FunctionDef name:select arguments arg:self If Compare op:Eq Assign" - }, - { - "library": "tensorflow", - "name": "OperatorNotAllowedInGraphError", - "source_code": "@tf_export('errors.OperatorNotAllowedInGraphError', v1 = []) class OperatorNotAllowedInGraphError(TypeError): pass", - "docstring": "Raised when an unsupported operator is present in Graph execution. For example, using a as a Python inside a Graph will raise . Iterating over values inside a is also not supported in Graph execution. Example: >>> @tf.function ... def iterate_over(t): ... a,b,c = t ... return a >>> >>> iterate_over(tf.constant([1, 2, 3])) Traceback (most recent call last): ... OperatorNotAllowedInGraphError: ...", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py", - "ast_data": "ClassDef name:OperatorNotAllowedInGraphError Call call:tf_export" - }, - { - "library": "scikit-learn", - "name": "is_torch_array", - "source_code": "def is_torch_array(x: object) -> TypeIs[torch.Tensor]: cls = cast(Hashable, type(x)) return _issubclass_fast(cls, 'torch', 'Tensor')", - "docstring": "Return True if is a PyTorch tensor. This function does not import PyTorch if it has not already been imported and is therefore cheap to use. See Also -------- array_namespace is_array_api_obj is_numpy_array is_cupy_array is_dask_array is_jax_array is_pydata_sparse_array", - "type": "function", - "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py", - "ast_data": "FunctionDef name:is_torch_array arguments arg:x type:object Assign Call call:cast Return return:yes" - }, - { - "library": "pytorch", - "name": "default_restore_location", - "source_code": "def default_restore_location(storage, location): for _, _, fn in _package_registry: result = fn(storage, location) if result is not None: return result raise RuntimeError(\"don't know how to restore data location of \" + torch.typename(storage) + ' (tagged with ' + location + ')')", - "docstring": "Restores using a deserializer function registered for the . This function looks in the registry for deserializer functions that match the . If found, it attempts to use them, in priority order, to restore until one returns a not result. If no deserializer can be found in the registry, or all found fail to bear a result, it raises a . Args: storage (STORAGE): the storage object to restore location (str): the location tag associated with the storage object Returns: storage: Optional[STORAGE] Raises: RuntimeError: If no deserializer matching is found in the registry or if all matching ones return .", - "type": "function", - "file_path": "pytorch\\torch\\serialization.py", - "ast_data": "FunctionDef name:default_restore_location arguments arg:storage arg:location For Assign Call call:fn If Compare op:IsNot Return return:yes Raise raises:RuntimeError(\"don't know how to restore data location of \" + torch.typename(storage) + ' (tagged with ' + location + ')')" - }, - { - "library": "pytorch", - "name": "issubtype", - "source_code": "def issubtype(left, right, recursive = True): left = TYPE2ABC.get(left, left) right = TYPE2ABC.get(right, right) if right is Any or left = = right: return True if isinstance(right, _GenericAlias): if getattr(right, '__origin__', None) is Generic: return True if right = = type(None): return False constraints = _decompose_type(right) if len(constraints) = = 0 or Any in constraints: return True if left is Any: return False variants = _decompose_type(left) if len(variants) = = 0: return False return all((_issubtype_with_constraints(variant, constraints, recursive) for variant in variants))", - "docstring": "Check if the left-side type is a subtype of the right-side type. If any of type is a composite type like and with bounds, it would be expanded into a list of types and check all of left-side types are subtypes of either one from right-side types.", - "type": "function", - "file_path": "pytorch\\torch\\utils\\data\\datapipes\\_typing.py", - "ast_data": "FunctionDef name:issubtype arguments arg:left arg:right arg:recursive Assign Call call:get Assign Call call:get If BoolOp Compare op:Is Compare op:Eq Return return:yes If Call call:isinstance If Compare op:Is Return return:yes If Compare op:Eq Return return:yes Assign Call call:_decompose_type If BoolOp Compare op:Eq Compare op:In Return return:yes If Compare op:Is Return return:yes Assign Call call:_decompose_type If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "output", - "source_code": "def output(self, U, T, X0 = None): return lsim(self, U, T, X0 = X0)", - "docstring": "Return the response of a continuous-time system to input . See for details.", - "type": "method", - "file_path": "scipy\\scipy\\signal\\_ltisys.py", - "ast_data": "FunctionDef name:output arguments arg:self arg:U arg:T arg:X0 Return return:yes" - }, - { - "library": "tensorflow", - "name": "inplace_update", - "source_code": "@deprecation.deprecated(None, 'Prefer tf.tensor_scatter_nd_update, which offers the same functionality with well-defined read-write semantics.') def inplace_update(x, i, v): return alias_inplace_update(gen_array_ops.deep_copy(x), i, v)", - "docstring": "Applies an inplace update on input x at index i with value v. Note that this function is not actually inplace - it allocates a copy of x. The utility is not avoiding memory copies but rather specifying a sparse update. If i is None, x and v must be the same shape. Computes y = x; y = v; If i is a scalar, x has a rank 1 higher than v's. Computes y = x; y[i, :] = v; Otherwise, x and v must have the same rank. Computes y = x; y[i, :] = v; Args: x: A Tensor. i: None, a scalar or a vector. v: A Tensor. Returns: Returns y, which is guaranteed not to be an alias of x.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\inplace_ops.py", - "ast_data": "FunctionDef name:inplace_update arguments arg:x arg:i arg:v Call call:deprecated Return return:yes" - }, - { - "library": "django", - "name": "parse_boundary_stream", - "source_code": "def parse_boundary_stream(stream, max_header_size): headers_chunk_size = 1024 while True: if headers_chunk_size > max_header_size: raise MultiPartParserError('Request max total header size exceeded.') chunk = stream.read(headers_chunk_size) header_end = chunk.find(b'\\r\\n\\r\\n') if header_end ! = -1: break stream.unget(chunk) if len(chunk) < headers_chunk_size: return (RAW, {}, stream) headers_chunk_size * = 2 header = chunk[: header_end] stream.unget(chunk[header_end + 4:]) TYPE = RAW outdict = {} for line in header.split(b'\\r\\n'): try: main_value_pair, params = parse_header_parameters(line.decode()) name, value = main_value_pair.split(': ', 1) params = {k: v.encode() for k, v in params.items()} except ValueError: continue if name = = 'content-disposition': TYPE = FIELD if params.get('filename'): TYPE = FILE outdict[name] = (value, params) if TYPE = = RAW: stream.unget(chunk) return (TYPE, outdict, stream)", - "docstring": "Parse one and exactly one stream that encapsulates a boundary.", - "type": "function", - "file_path": "django\\django\\http\\multipartparser.py", - "ast_data": "FunctionDef name:parse_boundary_stream arguments arg:stream arg:max_header_size Assign While If Compare op:Gt Raise raises:MultiPartParserError('Request max total header size exceeded.') Assign Call call:read Assign Call call:find If Compare op:NotEq If Compare op:Lt Return return:yes Assign Assign Assign For Call call:split Try Assign Call call:parse_header_parameters Assign Call call:split Assign ExceptHandler If Compare op:Eq Assign If Call call:get Assign Assign If Compare op:Eq Return return:yes" - }, - { - "library": "numpy", - "name": "__radd__", - "source_code": "def __radd__(self, other): return add(other, self)", - "docstring": "Return (other + self), that is string concatenation, element-wise for a pair of array_likes of or . See Also -------- add", - "type": "method", - "file_path": "numpy\\numpy\\_core\\defchararray.py", - "ast_data": "FunctionDef name:__radd__ arguments arg:self arg:other Return return:yes" - }, - { - "library": "tensorflow", - "name": "gpu_use_nccl_communication", - "source_code": "def gpu_use_nccl_communication() -> bool: return os.environ.get('DTENSOR_GPU_USE_NCCL_COMMUNICATION', '0') ! = '0'", - "docstring": "Return True if environment indicates NCCL shall be used for GPU.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\dtensor\\python\\config.py", - "ast_data": "FunctionDef name:gpu_use_nccl_communication arguments Return return:yes" - }, - { - "library": "tensorflow", - "name": "format_as_single_line", - "source_code": "def format_as_single_line(self, prefix = None, divider = ' | ', enabled_item_attrs = None, disabled_item_attrs = None): if enabled_item_attrs is not None and (not isinstance(enabled_item_attrs, list)): enabled_item_attrs = [enabled_item_attrs] if disabled_item_attrs is not None and (not isinstance(disabled_item_attrs, list)): disabled_item_attrs = [disabled_item_attrs] menu_line = prefix if prefix is not None else '' attr_segs = [] for item in self._items: menu_line + = item.caption item_name_begin = len(menu_line) - len(item.caption) if item.is_enabled(): final_attrs = [item] if enabled_item_attrs: final_attrs.extend(enabled_item_attrs) attr_segs.append((item_name_begin, len(menu_line), final_attrs)) elif disabled_item_attrs: attr_segs.append((item_name_begin, len(menu_line), disabled_item_attrs)) menu_line + = divider return RichTextLines(menu_line, font_attr_segs = {0: attr_segs})", - "docstring": "Format the menu as a single-line RichTextLines object. Args: prefix: (str) String added to the beginning of the line. divider: (str) The dividing string between the menu items. enabled_item_attrs: (list or str) Attributes applied to each enabled menu item, e.g., [\"bold\", \"underline\"]. disabled_item_attrs: (list or str) Attributes applied to each disabled menu item, e.g., [\"red\"]. Returns: (RichTextLines) A single-line output representing the menu, with font_attr_segs marking the individual menu items.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", - "ast_data": "FunctionDef name:format_as_single_line arguments arg:self arg:prefix arg:divider arg:enabled_item_attrs arg:disabled_item_attrs If BoolOp Compare op:IsNot Assign If BoolOp Compare op:IsNot Assign Assign Assign For Assign If Call call:is_enabled Assign If If Return return:yes" - }, - { - "library": "scipy", - "name": "iterate_hypercube", - "source_code": "def iterate_hypercube(self): if self.disp: logging.info('Constructing and refining simplicial complex graph structure') if self.n is None: self.HC.refine_all() self.n_sampled = self.HC.V.size() else: self.HC.refine(self.n) self.n_sampled + = self.n if self.disp: logging.info('Triangulation completed, evaluating all constraints and objective function values.') if len(self.LMC.xl_maps) > 0: for xl in self.LMC.cache: v = self.HC.V[xl] v_near = v.star() for v in v.nn: v_near = v_near.union(v.nn) self.HC.V.process_pools() if self.disp: logging.info('Evaluations completed.') self.fn = self.HC.V.nfev return", - "docstring": "Iterate a subdivision of the complex Note: called with `` after class initiation", - "type": "method", - "file_path": "scipy\\scipy\\optimize\\_shgo.py", - "ast_data": "FunctionDef name:iterate_hypercube arguments arg:self If If Compare op:Is Assign Call call:size If If Compare op:Gt For Assign Assign Call call:star For Assign Call call:union If Assign Return return:no" - }, - { - "library": "tensorflow", - "name": "seek", - "source_code": "@deprecation.deprecated_args(None, 'position is deprecated in favor of the offset argument.', 'position') def seek(self, offset = None, whence = 0, position = None): self._preread_check() if offset is None and position is None: raise TypeError('seek(): offset argument required') if offset is not None and position is not None: raise TypeError('seek(): offset and position may not be set simultaneously.') if position is not None: offset = position if whence = = 0: pass elif whence = = 1: offset + = self.tell() elif whence = = 2: offset + = self.size() else: raise errors.InvalidArgumentError(None, None, 'Invalid whence argument: {}. Valid values are 0, 1, or 2.'.format(whence)) self._read_buf.seek(offset)", - "docstring": "Seeks to the offset in the file. Args: offset: The byte count relative to the whence argument. whence: Valid values for whence are: 0: start of the file (default) 1: relative to the current position of the file 2: relative to the end of file. is usually negative.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", - "ast_data": "FunctionDef name:seek arguments arg:self arg:offset arg:whence arg:position Call call:deprecated_args If BoolOp Compare op:Is Compare op:Is Raise raises:TypeError('seek(): offset argument required') If BoolOp Compare op:IsNot Compare op:IsNot Raise raises:TypeError('seek(): offset and position may not be set simultaneously.') If Compare op:IsNot Assign If Compare op:Eq If Compare op:Eq If Compare op:Eq Raise raises:errors.InvalidArgumentError(None, None, 'Invalid whence argument: {}. Valid values are 0, 1, or 2.'.format(whence))" - }, - { - "library": "pytorch", - "name": "from_float", - "source_code": "@classmethod def from_float(cls, mod, use_precomputed_fake_quant = False): assert type(mod) = = cls._FLOAT_MODULE, 'qat.' + cls.__name__ + '.from_float only works for ' + cls._FLOAT_MODULE.__name__ assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined' assert mod.qconfig, 'Input float module must have a valid qconfig' qconfig = mod.qconfig conv, bn = (mod[0], mod[1]) qat_convbn = cls(conv.in_channels, conv.out_channels, conv.kernel_size, conv.stride, conv.padding, conv.dilation, conv.groups, conv.bias is not None, conv.padding_mode, bn.eps, bn.momentum, False, qconfig) qat_convbn.weight = conv.weight qat_convbn.bias = conv.bias qat_convbn.bn.weight = bn.weight qat_convbn.bn.bias = bn.bias qat_convbn.bn.running_mean = bn.running_mean qat_convbn.bn.running_var = bn.running_var qat_convbn.bn.num_batches_tracked = bn.num_batches_tracked return qat_convbn", - "docstring": "Create a qat module from a float module or qparams_dict Args: a float module, either produced by torch.ao.quantization utilities or directly from user", - "type": "method", - "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\qat\\modules\\conv_fused.py", - "ast_data": "FunctionDef name:from_float arguments arg:cls arg:mod arg:use_precomputed_fake_quant Assign Assign Assign Call call:cls Assign Assign Assign Assign Assign Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "summary_writer", - "source_code": "@property def summary_writer(self): return self._summary_writer", - "docstring": "Return the SummaryWriter used by the chief supervisor. Returns: A SummaryWriter.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py", - "ast_data": "FunctionDef name:summary_writer arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "transform_affine", - "source_code": "def transform_affine(self, values): return self.get_affine().transform(values)", - "docstring": "Apply only the affine part of this transformation on the given array of values. `~Transform.input_dims~Transform.input_dims~Transform.output_dims~Transform.output_dims`), depending on the input.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", - "ast_data": "FunctionDef name:transform_affine arguments arg:self arg:values Return return:yes" - }, - { - "library": "django", - "name": "column_sql", - "source_code": "def column_sql(self, model, field, include_default = False): field_db_params = field.db_parameters(connection = self.connection) column_db_type = field_db_params['type'] if column_db_type is None: return (None, None) params = [] return (' '.join(self._iter_column_sql(column_db_type, params, model, field, field_db_params, include_default)), params)", - "docstring": "Return the column definition for a field. The field must already have had set_attributes_from_name() called.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\schema.py", - "ast_data": "FunctionDef name:column_sql arguments arg:self arg:model arg:field arg:include_default Assign Call call:db_parameters Assign If Compare op:Is Return return:yes Assign Return return:yes" - }, - { - "library": "numpy", - "name": "compress", - "source_code": "@array_function_dispatch(_compress_dispatcher) def compress(condition, a, axis = None, out = None): return _wrapfunc(a, 'compress', condition, axis = axis, out = out)", - "docstring": "Return selected slices of an array along given axis. When working along a given axis, a slice along that axis is returned in for each index where evaluates to True. When working on a 1-D array, is equivalent to . Parameters ---------- condition : 1-D array of bools Array that selects which entries to return. If len(condition) is less than the size of along the given axis, then output is truncated to the length of the condition array. a : array_like Array from which to extract a part. axis : int, optional Axis along which to take slices. If None (default), work on the flattened array. out : ndarray, optional Output array. Its type is preserved and it must be of the right shape to hold the output. Returns ------- compressed_array : ndarray A copy of without the slices along axis for which is false. See Also -------- take, choose, diag, diagonal, select ndarray.compress : Equivalent method in ndarray extract : Equivalent method when working on 1-D arrays :ref: Examples -------- >>> import numpy as np >>> a = np.array([[1, 2], [3, 4], [5, 6]]) >>> a array([[1, 2], [3, 4], [5, 6]]) >>> np.compress([0, 1], a, axis=0) array([[3, 4]]) >>> np.compress([False, True, True], a, axis=0) array([[3, 4], [5, 6]]) >>> np.compress([False, True], a, axis=1) array([[2], [4], [6]]) Working on the flattened array does not return slices along an axis but selects elements. >>> np.compress([False, True], a) array([2])", - "type": "function", - "file_path": "numpy\\numpy\\_core\\fromnumeric.py", - "ast_data": "FunctionDef name:compress arguments arg:condition arg:a arg:axis arg:out Call call:array_function_dispatch Return return:yes" - }, - { - "library": "matplotlib", - "name": "LayoutEngine", - "source_code": "class LayoutEngine: _adjust_compatible = None _colorbar_gridspec = None def __init__(self, **kwargs): super().__init__(**kwargs) self._params = {} def set(self, **kwargs): raise NotImplementedError @property def colorbar_gridspec(self): if self._colorbar_gridspec is None: raise NotImplementedError return self._colorbar_gridspec @property def adjust_compatible(self): if self._adjust_compatible is None: raise NotImplementedError return self._adjust_compatible def get(self): return dict(self._params) def execute(self, fig): raise NotImplementedError", - "docstring": "Base class for Matplotlib layout engines. A layout engine can be passed to a figure at instantiation or at any time with . Once attached to a figure, the layout engine `~.figure.Figure.draw~.figure.Figure.set_layout_engineLayoutEngine.Figure.colorbar.colorbar.make_axes_gridspec.colorbar.make_axes.Figure.subplots_adjustLayoutEngineLayoutEngine.setLayoutEngine.execute` with your implementation", - "type": "class", - "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py", - "ast_data": "ClassDef name:LayoutEngine Assign Assign FunctionDef name:__init__ arguments arg:self kwarg:kwargs Assign FunctionDef name:set arguments arg:self kwarg:kwargs Raise raises:NotImplementedError FunctionDef name:colorbar_gridspec arguments arg:self If Compare op:Is Raise raises:NotImplementedError Return return:yes FunctionDef name:adjust_compatible arguments arg:self If Compare op:Is Raise raises:NotImplementedError Return return:yes FunctionDef name:get arguments arg:self Return return:yes FunctionDef name:execute arguments arg:self arg:fig Raise raises:NotImplementedError" - }, - { - "library": "matplotlib", - "name": "set_params", - "source_code": "def set_params(self, base = None, offset = None): if base is not None: self._edge = _Edge_integer(base, 0) if offset is not None: self._offset = offset", - "docstring": "Set parameters within this locator. Parameters ---------- base : float > 0, optional Interval between ticks. offset : float, optional Value added to each multiple of *base*. .. versionadded:: 3.8", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", - "ast_data": "FunctionDef name:set_params arguments arg:self arg:base arg:offset If Compare op:IsNot Assign Call call:_Edge_integer If Compare op:IsNot Assign" - }, - { - "library": "pytorch", - "name": "create_rdzv_handler", - "source_code": "def create_rdzv_handler(params: RendezvousParameters) -> RendezvousHandler: client = _create_etcd_client(params) etcd_prefix = params.get('etcd_prefix', '/torchelastic/p2p') rdzv = EtcdRendezvous(client = client, prefix = etcd_prefix, run_id = params.run_id, num_min_workers = params.min_nodes, num_max_workers = params.max_nodes, timeout = params.get_as_int('timeout', _DEFAULT_TIMEOUT), last_call_timeout = params.get_as_int('last_call_timeout', _DEFAULT_LAST_CALL_TIMEOUT)) return EtcdRendezvousHandler(rdzv_impl = rdzv, local_addr = params.local_addr)", - "docstring": "Usage: :: rdzv_params = RendezvousParameters( backend=\"etcd\", endpoint=\"192.168.0.42:2379\", run_id=\"123\", min_nodes=4, max_nodes=8, timeout=300, last_call_timeout=30, etcd_prefix=\"custom_prefix\", protocol=\"https\", cacert=\"/etc/kubernetes/certs/ca.crt\", cert=\"/etc/kubernetes/certs/client.crt\", key=\"/etc/kubernetes/certs/client.key\") # -- or -- rdzv_params = RendezvousParameters( backend=\"etcd\", endpoint=\"192.168.0.42:2379\", run_id=\"123\", min_nodes=4, max_nodes=8) etcd_rdzv_handler = create_etcd_rendezvous_handler(rdzv_params) Where: run_id - unique id for this training job instance, min_nodes - min number of workers expected to join the rendezvous, max_nodes - max number of workers allowed to join the rendezvous, defaults to min_workers is not specified. timeout - total timeout within which next_rendezvous is expected to succeed; a RendezvousTimeoutError is raised otherwise; Defaults is 600 (10 minutes). last_call_timeout - additional wait amount (\"last call\") after min number of workers has been reached. Defaults to 30 seconds. etcd_prefix - path prefix (from etcd root), inside which all etcd nodes will be created. Default is \"/torchelastic/p2p\". protocol - http (default) or https to access etcd. cacert - CA cert to access etcd, only makes sense with https. cert - client cert to access etcd, only makes sense with https. key - client key to access etcd, only makes sense with https.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_rendezvous.py", - "ast_data": "FunctionDef name:create_rdzv_handler arguments arg:params type:RendezvousParameters Assign Call call:_create_etcd_client Assign Call call:get Assign Call call:EtcdRendezvous Return return:yes" - }, - { - "library": "pandas", - "name": "nunique_ints", - "source_code": "def nunique_ints(values: ArrayLike) -> int: if len(values) = = 0: return 0 values = _ensure_data(values) result = (np.bincount(values.ravel().astype('intp')) ! = 0).sum() return result", - "docstring": "Return the number of unique values for integer array-likes. Significantly faster than pandas.unique for long enough sequences. No checks are done to ensure input is integral. Parameters ---------- values : 1d array-like Returns ------- int : The number of unique values in ``", - "type": "function", - "file_path": "pandas\\pandas\\core\\algorithms.py", - "ast_data": "FunctionDef name:nunique_ints arguments arg:values type:ArrayLike If Compare op:Eq Return return:yes Assign Call call:_ensure_data Assign Call call:sum Return return:yes" - }, - { - "library": "scikit-learn", - "name": "predict", - "source_code": "def predict(self, X): probas = self.predict_proba(X) return self.classes_[np.argmax(probas, axis = 1)].ravel()", - "docstring": "Perform inductive inference across the model. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. Returns ------- y : ndarray of shape (n_samples,) Predictions for input data.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\semi_supervised\\_label_propagation.py", - "ast_data": "FunctionDef name:predict arguments arg:self arg:X Assign Call call:predict_proba Return return:yes" - }, - { - "library": "django", - "name": "BaseTzLoader", - "source_code": "class BaseTzLoader(TimestamptzLoader): timezone = None def load(self, data): res = super().load(data) return res.replace(tzinfo = self.timezone)", - "docstring": "Load a PostgreSQL timestamptz using the a specific timezone. The timezone can be None too, in which case it will be chopped.", - "type": "class", - "file_path": "django\\django\\db\\backends\\postgresql\\psycopg_any.py", - "ast_data": "ClassDef name:BaseTzLoader Assign FunctionDef name:load arguments arg:self arg:data Assign Call call:load Return return:yes" - }, - { - "library": "django", - "name": "decorator_from_middleware_with_args", - "source_code": "def decorator_from_middleware_with_args(middleware_class): return make_middleware_decorator(middleware_class)", - "docstring": "Like decorator_from_middleware, but return a function that accepts the arguments to be passed to the middleware_class. Use like:: cache_page = decorator_from_middleware_with_args(CacheMiddleware) # ... @cache_page(3600) def my_view(request): # ...", - "type": "function", - "file_path": "django\\django\\utils\\decorators.py", - "ast_data": "FunctionDef name:decorator_from_middleware_with_args arguments arg:middleware_class Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "@doc_controls.do_not_generate_docs def __init__(self, values, row_partition, internal = False): if not internal: raise ValueError('RaggedTensor constructor is private; please use one of the factory methods instead (e.g., RaggedTensor.from_row_lengths())') _assert_is_supported_ragged_values_type(values) if not isinstance(row_partition, RowPartition): raise TypeError(f'Argument `row_partition` must be a RowPartition. Received {row_partition}.') values.shape.with_rank_at_least(1) if isinstance(values, RaggedTensor): assert row_partition.dtype = = values._row_partition.dtype self._values = values self._row_partition = row_partition", - "docstring": "Creates a with a specified partitioning for . This constructor is private -- please use one of the following ops to build s: * * * * * * * * Args: values: A potentially ragged tensor of any dtype and shape . row_partition: A object, representing the arrangement of the lists at the top level. internal: True if the constructor is being called by one of the factory methods. If false, an exception will be raised. Raises: ValueError: If internal = False. Note that this method is intended only for internal use. TypeError: If values is not a or , or row_partition is not a .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:values arg:row_partition arg:internal If Raise raises:ValueError('RaggedTensor constructor is private; please use one of the factory methods instead (e.g., RaggedTensor.from_row_lengths())') If Raise raises:TypeError(f'Argument `row_partition` must be a RowPartition. Received {row_partition}.') If Call call:isinstance Assign Assign" - }, - { - "library": "pandas", - "name": "nanargmax", - "source_code": "def nanargmax(values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask: npt.NDArray[np.bool_] | None = None) -> int | np.ndarray: values, mask = _get_values(values, True, fill_value_typ = '-inf', mask = mask) result = values.argmax(axis) result = _maybe_arg_null_out(result, axis, mask, skipna) return result", - "docstring": "Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- result : int or ndarray[int] The index/indices of max value in specified axis or -1 in the NA case Examples -------- >>> from pandas.core import nanops >>> arr = np.array([1, 2, 3, np.nan, 4]) >>> nanops.nanargmax(arr) np.int64(4) >>> arr = np.array(range(12), dtype=np.float64).reshape(4, 3) >>> arr[2:, 2] = np.nan >>> arr array([[ 0., 1., 2.], [ 3., 4., 5.], [ 6., 7., nan], [ 9., 10., nan]]) >>> nanops.nanargmax(arr, axis=1) array([2, 2, 1, 1])", - "type": "function", - "file_path": "pandas\\pandas\\core\\nanops.py", - "ast_data": "FunctionDef name:nanargmax arguments arg:values type:np.ndarray Assign Call call:_get_values Assign Call call:argmax Assign Call call:_maybe_arg_null_out Return return:yes" - }, - { - "library": "pytorch", - "name": "get_root_mesh_dim", - "source_code": "def get_root_mesh_dim(self, device_mesh: 'DeviceMesh') -> Optional[int]: root_mesh = self.get_root_mesh(device_mesh) child_mesh_dim_names = device_mesh.mesh_dim_names if root_mesh and child_mesh_dim_names: assert len(child_mesh_dim_names) = = 1, 'The submesh can only be a 1D mesh.' child_mesh_dim_name = child_mesh_dim_names[0] return self.get_mesh_dim_by_name(root_mesh, child_mesh_dim_name) return None", - "docstring": "Returns the index of the mesh dim in the root mesh. The device_mesh passed in needs to be sliced out from the root mesh or submesh of the root mesh.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\device_mesh.py", - "ast_data": "FunctionDef name:get_root_mesh_dim arguments arg:self arg:device_mesh type:'DeviceMesh' Assign Call call:get_root_mesh Assign If BoolOp Assign Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "math_to_image", - "source_code": "def math_to_image(s, filename_or_obj, prop = None, dpi = None, format = None, *, color = None): from matplotlib import figure parser = MathTextParser('path') width, height, depth, _, _ = parser.parse(s, dpi = 72, prop = prop) fig = figure.Figure(figsize = (width / 72.0, height / 72.0)) fig.text(0, depth / height, s, fontproperties = prop, color = color) fig.savefig(filename_or_obj, dpi = dpi, format = format) return depth", - "docstring": "Given a math expression, renders it in a closely-clipped bounding box to an image file. Parameters ---------- s : str A math expression. The math portion must be enclosed in dollar signs. filename_or_obj : str or path-like or file-like Where to write the image data. prop : , optional The size and style of the text. dpi : float, optional The output dpi. If not set, the dpi is determined as for . format : str, optional The output format, e.g., 'svg', 'pdf', 'ps' or 'png'. If not set, the format is determined as for . color : str, optional Foreground color, defaults to :rc:.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\mathtext.py", - "ast_data": "FunctionDef name:math_to_image arguments arg:s arg:filename_or_obj arg:prop arg:dpi arg:format Assign Call call:MathTextParser Assign Call call:parse Assign Call call:Figure Return return:yes" - }, - { - "library": "tensorflow", - "name": "bias_add", - "source_code": "@tf_export('nn.bias_add') @dispatch.add_dispatch_support def bias_add(value, bias, data_format = None, name = None): with ops.name_scope(name, 'BiasAdd', [value, bias]) as name: if data_format is not None: if data_format.startswith('NC'): data_format = 'NCHW' elif data_format.startswith('N') and data_format.endswith('C'): data_format = 'NHWC' else: raise ValueError(f'`data_format` must be of the form `N...C` or `NC...`. Received: data_format = {data_format}') if not context.executing_eagerly(): value = ops.convert_to_tensor(value, name = 'input') bias = ops.convert_to_tensor(bias, dtype = value.dtype, name = 'bias') return gen_nn_ops.bias_add(value, bias, data_format = data_format, name = name)", - "docstring": "Adds to . This is (mostly) a special case of where is restricted to 1-D. Broadcasting is supported, so may have any number of dimensions. Unlike , the type of is allowed to differ from in the case where both types are quantized. Args: value: A with type , , , , , , , , or . bias: A 1-D with size matching the channel dimension of . Must be the same type as unless is a quantized type, in which case a different quantized type may be used. data_format: A string. 'N...C' and 'NC...' are supported. If (the default) is specified then 'N..C' is assumed. name: A name for the operation (optional). Returns: A with the same type as . Raises: ValueError if data format is unrecognized, if has less than two dimensions when is 'N..C'/ or has less then three dimensions when is , if does not have exactly one dimension (is a vector), or if the size of does not match the size of the channel dimension of .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", - "ast_data": "FunctionDef name:bias_add arguments arg:value arg:bias arg:data_format arg:name Call call:tf_export With If Compare op:IsNot If Call call:startswith Assign If BoolOp Call call:startswith Call call:endswith Assign Raise raises:ValueError(f'`data_format` must be of the form `N...C` or `NC...`. Received: data_format={data_format}') If Assign Call call:convert_to_tensor Assign Call call:convert_to_tensor Return return:yes" - }, - { - "library": "coconut", - "name": "print_formatted_error", - "source_code": "def print_formatted_error(self, errmsg, warning = False): if errmsg is not None: if self.path is not None: errmsg_lines = ['in ' + self.path + ': '] for line in errmsg.splitlines(): if line: line = ' ' * taberrfmt + line errmsg_lines.append(line) errmsg = '\\n'.join(errmsg_lines) if warning: self.printlog(errmsg) else: self.printerr(errmsg)", - "docstring": "Print a formatted error message in the current context.", - "type": "method", - "file_path": "coconut\\coconut\\terminal.py", - "ast_data": "FunctionDef name:print_formatted_error arguments arg:self arg:errmsg arg:warning If Compare op:IsNot If Compare op:IsNot Assign For Call call:splitlines If Assign Assign Call call:join If" - }, - { - "library": "tensorflow", - "name": "function_scope_id", - "source_code": "@property def function_scope_id(self): return id(self._context_handle)", - "docstring": "Returns an id that is unique to each scope holding functions.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", - "ast_data": "FunctionDef name:function_scope_id arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "accepted_type", - "source_code": "def accepted_type(self, media_type): return next((accepted_type for accepted_type in self.accepted_types if accepted_type.match(media_type)), None)", - "docstring": "Return the preferred MediaType instance which matches the given media type.", - "type": "method", - "file_path": "django\\django\\http\\request.py", - "ast_data": "FunctionDef name:accepted_type arguments arg:self arg:media_type Return return:yes" - }, - { - "library": "django", - "name": "get_asgi_application", - "source_code": "def get_asgi_application(): django.setup(set_prefix = False) return ASGIHandler()", - "docstring": "The public interface to Django's ASGI support. Return an ASGI 3 callable. Avoids making django.core.handlers.ASGIHandler a public API, in case the internal implementation changes or moves in the future.", - "type": "function", - "file_path": "django\\django\\core\\asgi.py", - "ast_data": "FunctionDef name:get_asgi_application arguments Return return:yes" - }, - { - "library": "pandas", - "name": "add_object_type_line", - "source_code": "def add_object_type_line(self) -> None: self._lines.append(str(type(self.data)))", - "docstring": "Add line with string representation of dataframe to the table.", - "type": "method", - "file_path": "pandas\\pandas\\io\\formats\\info.py", - "ast_data": "FunctionDef name:add_object_type_line arguments arg:self" - }, - { - "library": "virtualenv", - "name": "run", - "source_code": "@abstractmethod def run(self): raise NotImplementedError", - "docstring": "Discovers an interpreter. :return: the interpreter ready to use for virtual environment creation", - "type": "method", - "file_path": "virtualenv\\src\\virtualenv\\discovery\\discover.py", - "ast_data": "FunctionDef name:run arguments arg:self Raise raises:NotImplementedError" - }, - { - "library": "sphinx", - "name": "emit_firstresult", - "source_code": "def emit_firstresult(self, event: str, *args: Any, allowed_exceptions: tuple[type[Exception], ...] = ()) -> Any: return self.events.emit_firstresult(event, *args, allowed_exceptions = allowed_exceptions)", - "docstring": "Emit *event* and pass *arguments* to the callback functions. Return the result of the first callback that doesn't return ``. :param event: The name of event that will be emitted :param args: The arguments for the event :param allowed_exceptions: The list of exceptions that are allowed in the callbacks .. versionadded:: 0.5 .. versionchanged:: 3.1 Added *allowed_exceptions* to specify path-through exceptions", - "type": "method", - "file_path": "sphinx\\sphinx\\application.py", - "ast_data": "FunctionDef name:emit_firstresult arguments arg:self arg:event type:str vararg:args Return return:yes" - }, - { - "library": "numpy", - "name": "combine_paths", - "source_code": "def combine_paths(self, *args): return combine_paths(*args)", - "docstring": "Return a list of existing paths composed by all combinations of items from the arguments.", - "type": "method", - "file_path": "numpy\\numpy\\distutils\\system_info.py", - "ast_data": "FunctionDef name:combine_paths arguments arg:self vararg:args Return return:yes" - }, - { - "library": "tensorflow", - "name": "all_gather_with_padding", - "source_code": "def all_gather_with_padding(input_tensor: core.TensorLike, options: Optional[collective_util.Options]) -> core.Tensor: max_length = math_ops.reduce_max(all_lengths) padded_tensor = _pad_util(input_tensor, max_length) all_padded_tensors = self._all_gather(padded_tensor, options) split_tensors = [] for i in range(self._group_size): start_pos = i * max_length split_tensors.append(all_padded_tensors[start_pos: start_pos + all_lengths[i]]) return array_ops.concat(split_tensors, 0)", - "docstring": "all_gather tensors of different sizes using padding.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py", - "ast_data": "FunctionDef name:all_gather_with_padding arguments arg:input_tensor type:core.TensorLike arg:options type:Optional[collective_util.Options] Assign Call call:reduce_max Assign Call call:_pad_util Assign Call call:_all_gather Assign For Call call:range Assign Return return:yes" - }, - { - "library": "django", - "name": "pprint", - "source_code": "@register.filter(is_safe = True) def pprint(value): try: return pformat(value) except Exception as e: return 'Error in formatting: %s: %s' % (e.__class__.__name__, e)", - "docstring": "A wrapper around pprint.pprint -- for debugging, really.", - "type": "function", - "file_path": "django\\django\\template\\defaultfilters.py", - "ast_data": "FunctionDef name:pprint arguments arg:value Call call:filter Try Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "matplotlib", - "name": "contourf", - "source_code": "@_preprocess_data() def contourf(self, X, Y, Z, *args, zdir = 'z', offset = None, axlim_clip = False, **kwargs): had_data = self.has_data() jX, jY, jZ = art3d.rotate_axes(X, Y, Z, zdir) cset = super().contourf(jX, jY, jZ, *args, **kwargs) levels = self._add_contourf_set(cset, zdir, offset, axlim_clip) self._auto_scale_contourf(X, Y, Z, zdir, levels, had_data) return cset", - "docstring": "Create a 3D filled contour plot. Parameters ---------- X, Y, Z : array-like Input data. See for supported data shapes. zdir : {'x', 'y', 'z'}, default: 'z' The direction to use. offset : float, optional If specified, plot a projection of the contour lines at this position in a plane normal to *zdir*. axlim_clip : bool, default: False Whether to hide lines with a vertex outside the axes view limits. .. versionadded:: 3.10 data : indexable object, optional DATA_PARAMETER_PLACEHOLDER *args, **kwargs Other arguments are forwarded to . Returns ------- matplotlib.contour.QuadContourSet", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", - "ast_data": "FunctionDef name:contourf arguments arg:self arg:X arg:Y arg:Z vararg:args kwarg:kwargs Call call:_preprocess_data Assign Call call:has_data Assign Call call:rotate_axes Assign Call call:contourf Assign Call call:_add_contourf_set Return return:yes" - }, - { - "library": "pytorch", - "name": "is_lifted_tensor_constant", - "source_code": "def is_lifted_tensor_constant(program: 'ExportedProgram', node: torch.fx.Node) -> bool: return node.name in program.graph_signature.inputs_to_lifted_tensor_constants", - "docstring": "Checks if the given node is a lifted tensor constant within the exported program", - "type": "function", - "file_path": "pytorch\\torch\\_export\\utils.py", - "ast_data": "FunctionDef name:is_lifted_tensor_constant arguments arg:program type:'ExportedProgram' arg:node type:torch.fx.Node Return return:yes" - }, - { - "library": "pytorch", - "name": "poisson_nll_loss", - "source_code": "@elementwise_type_promotion_wrapper(type_promoting_args = ('input', 'target'), type_promotion_kind = ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT) def poisson_nll_loss(input: TensorLikeType, target: TensorLikeType, log_input: bool = True, full: bool = False, size_average: Optional[bool] = None, eps: float = 1e-08, reduce: Optional[bool] = None, reduction: str = 'mean') -> TensorLikeType: if size_average is not None or reduce is not None: reduction = _get_string_reduction_arg(size_average = size_average, reduce = reduce) _check_reduction_value(reduction) if log_input: loss = torch.exp(input) - target * input else: loss = input - target * torch.log(input + eps) if full: stirling_term = target * torch.log(target) - target + 0.5 * torch.log(2 * torch.pi * target) loss = loss + stirling_term.masked_fill(target < = 1, 0) return _apply_loss_reduction(loss, reduction)", - "docstring": "Reference implementation of torch.nn.functional.poisson_nll_loss", - "type": "function", - "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py", - "ast_data": "FunctionDef name:poisson_nll_loss arguments arg:input type:TensorLikeType arg:target type:TensorLikeType arg:log_input type:bool arg:full type:bool arg:size_average type:Optional[bool] arg:eps type:float arg:reduce type:Optional[bool] arg:reduction type:str Call call:elementwise_type_promotion_wrapper If BoolOp Compare op:IsNot Compare op:IsNot Assign Call call:_get_string_reduction_arg If Assign Assign If Assign Assign Return return:yes" - }, - { - "library": "scipy", - "name": "toeplitz", - "source_code": "def toeplitz(c, r = None): c = np.asarray(c) if r is None: r = c.conjugate() else: r = np.asarray(r) if c.ndim > 1 or r.ndim > 1: msg = 'Beginning in SciPy 1.17, multidimensional input will be treated as a batch, not `ravel`ed. To preserve the existing behavior and silence this warning, `ravel` arguments before passing them to `toeplitz`.' warnings.warn(msg, FutureWarning, stacklevel = 2) c, r = (c.ravel(), r.ravel()) vals = np.concatenate((c[: : -1], r[1:])) out_shp = (len(c), len(r)) n = vals.strides[0] return as_strided(vals[len(c) - 1:], shape = out_shp, strides = (-n, n)).copy()", - "docstring": "Construct a Toeplitz matrix. The Toeplitz matrix has constant diagonals, with c as its first column and r as its first row. If r is not given, `toeplitzcrcr` is None, was changed in version 0.8.0. The behavior in previous versions was undocumented and is no longer supported. Examples -------- >>> from scipy.linalg import toeplitz >>> toeplitz([1,2,3], [1,4,5,6]) array([[1, 4, 5, 6], [2, 1, 4, 5], [3, 2, 1, 4]]) >>> toeplitz([1.0, 2+3j, 4-1j]) array([[ 1.+0.j, 2.-3.j, 4.+1.j], [ 2.+3.j, 1.+0.j, 2.-3.j], [ 4.-1.j, 2.+3.j, 1.+0.j]])", - "type": "function", - "file_path": "scipy\\scipy\\linalg\\_special_matrices.py", - "ast_data": "FunctionDef name:toeplitz arguments arg:c arg:r Assign Call call:asarray If Compare op:Is Assign Call call:conjugate Assign Call call:asarray If BoolOp Compare op:Gt Compare op:Gt Assign Assign Assign Call call:concatenate Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "summary_writer_initializer_op", - "source_code": "def summary_writer_initializer_op(): if context.executing_eagerly(): raise RuntimeError('tf.contrib.summary.summary_writer_initializer_op is only supported in graph mode.') return ops.get_collection(_SUMMARY_WRITER_INIT_COLLECTION_NAME)", - "docstring": "Graph-mode only. Returns the list of ops to create all summary writers. Returns: The initializer ops. Raises: RuntimeError: If in Eager mode.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", - "ast_data": "FunctionDef name:summary_writer_initializer_op arguments If Call call:executing_eagerly Raise raises:RuntimeError('tf.contrib.summary.summary_writer_initializer_op is only supported in graph mode.') Return return:yes" - }, - { - "library": "pytorch", - "name": "as_python_constant", - "source_code": "def as_python_constant(self): return self.__variable.as_python_constant()", - "docstring": "Returns the Python value this variable would have, but only if it is completely known at compile-time (e.g., it is constant). WARNING: Do NOT mutate the returned constant. The returned constant may or may not correspond to the actual value this variable may take on at runtime; for example, if the variable in question is a constant list, we may return a copy of that list.", - "type": "method", - "file_path": "pytorch\\torch\\_dynamo\\comptime.py", - "ast_data": "FunctionDef name:as_python_constant arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "eliminate_zeros", - "source_code": "def eliminate_zeros(self): mask = self.data ! = 0 self.data = self.data[mask] self.coords = tuple((idx[mask] for idx in self.coords))", - "docstring": "Remove zero entries from the array/matrix This is an *in place* operation", - "type": "method", - "file_path": "scipy\\scipy\\sparse\\_coo.py", - "ast_data": "FunctionDef name:eliminate_zeros arguments arg:self Assign Compare op:NotEq Assign Assign Call call:tuple" - }, - { - "library": "scrapy", - "name": "string_camelcase", - "source_code": "def string_camelcase(string: str) -> str: return CAMELCASE_INVALID_CHARS.sub('', string.title())", - "docstring": "Convert a word to its CamelCase version and remove invalid chars >>> string_camelcase('lost-pound') 'LostPound' >>> string_camelcase('missing_images') 'MissingImages'", - "type": "function", - "file_path": "scrapy\\scrapy\\utils\\template.py", - "ast_data": "FunctionDef name:string_camelcase arguments arg:string type:str Return return:yes" - }, - { - "library": "tensorflow", - "name": "TextFileIndex", - "source_code": "@tf_export('lookup.TextFileIndex') class TextFileIndex: WHOLE_LINE = -2 LINE_NUMBER = -1", - "docstring": "The key and value content to get from each line. This class defines the key and value used for . The key and value content to get from each line is specified either by the following, or a value . * means use the line number starting from zero, expects data type int64. * means use the whole line content, expects data type string. A value means use the index (starting at zero) of the split line based on .", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", - "ast_data": "ClassDef name:TextFileIndex Call call:tf_export Assign Assign" - }, - { - "library": "numpy", - "name": "convolve", - "source_code": "def convolve(a, v, mode = 'full', propagate_mask = True): return _convolve_or_correlate(np.convolve, a, v, mode, propagate_mask)", - "docstring": "Returns the discrete, linear convolution of two one-dimensional sequences. Parameters ---------- a, v : array_like Input sequences. mode : {'valid', 'same', 'full'}, optional Refer to the docstring. propagate_mask : bool If True, then if any masked element is included in the sum for a result element, then the result is masked. If False, then the result element is only masked if no non-masked cells contribute towards it Returns ------- out : MaskedArray Discrete, linear convolution of and . See Also -------- numpy.convolve : Equivalent function in the top-level NumPy module.", - "type": "function", - "file_path": "numpy\\numpy\\ma\\core.py", - "ast_data": "FunctionDef name:convolve arguments arg:a arg:v arg:mode arg:propagate_mask Return return:yes" - }, - { - "library": "pytorch", - "name": "copy", - "source_code": "def copy(self) -> ParameterDict: return ParameterDict(OrderedDict(((k, self[k]) for k in self._keys)))", - "docstring": "Return a copy of this :class: instance.", - "type": "method", - "file_path": "pytorch\\torch\\nn\\modules\\container.py", - "ast_data": "FunctionDef name:copy arguments arg:self Return return:yes" - }, - { - "library": "salmon", - "name": "ConfirmationStorage", - "source_code": "class ConfirmationStorage: def __init__(self, db = {}): self.confirmations = db def clear(self): self.confirmations.clear() def key(self, target, from_address): key = target + ': ' + from_address return key.encode('ascii') def get(self, target, from_address): return self.confirmations.get(self.key(target, from_address), (None, None)) def delete(self, target, from_address): try: del self.confirmations[self.key(target, from_address)] except KeyError: pass def store(self, target, from_address, expected_secret, pending_message_id): self.confirmations[self.key(target, from_address)] = (expected_secret, pending_message_id)", - "docstring": "This is the basic confirmation storage. For simple testing purposes you can just use the default hash db parameter. If you do a deployment you can probably get away with a shelf hash instead. You can write your own version of this and use it. The confirmation engine only cares that it gets something that supports all of these methods.", - "type": "class", - "file_path": "salmon\\salmon\\confirm.py", - "ast_data": "ClassDef name:ConfirmationStorage FunctionDef name:__init__ arguments arg:self arg:db Assign FunctionDef name:clear arguments arg:self FunctionDef name:key arguments arg:self arg:target arg:from_address Assign Return return:yes FunctionDef name:get arguments arg:self arg:target arg:from_address Return return:yes FunctionDef name:delete arguments arg:self arg:target arg:from_address Try ExceptHandler FunctionDef name:store arguments arg:self arg:target arg:from_address arg:expected_secret arg:pending_message_id Assign" - }, - { - "library": "scipy", - "name": "trim_mean", - "source_code": "def trim_mean(a, proportiontocut, axis = 0): a = np.asarray(a) if a.size = = 0: return np.nan if axis is None: a = a.ravel() axis = 0 nobs = a.shape[axis] lowercut = int(proportiontocut * nobs) uppercut = nobs - lowercut if lowercut > uppercut: raise ValueError('Proportion too big.') atmp = np.partition(a, (lowercut, uppercut - 1), axis) sl = [slice(None)] * atmp.ndim sl[axis] = slice(lowercut, uppercut) return np.mean(atmp[tuple(sl)], axis = axis)", - "docstring": "Return mean of array after trimming a specified fraction of extreme values Removes the specified proportion of elements from *each* end of the sorted array, then computes the mean of the remaining elements. Parameters ---------- a : array_like Input array. proportiontocut : float Fraction of the most positive and most negative elements to remove. When the specified proportion does not result in an integer number of elements, the number of elements to trim is rounded down. axis : int or None, default: 0 Axis along which the trimmed means are computed. If None, compute over the raveled array. Returns ------- trim_mean : ndarray Mean of trimmed array. See Also -------- trimboth : Remove a proportion of elements from each end of an array. tmean : Compute the mean after trimming values outside specified limits. Notes ----- For 1-D array , is approximately equivalent to the following calculation:: import numpy as np a = np.sort(a) m = int(proportiontocut * len(a)) np.mean(a[m: len(a) - m]) Examples -------- >>> import numpy as np >>> from scipy import stats >>> x = [1, 2, 3, 5] >>> stats.trim_mean(x, 0.25) 2.5 When the specified proportion does not result in an integer number of elements, the number of elements to trim is rounded down. >>> stats.trim_mean(x, 0.24999) == np.mean(x) True Use to specify the axis along which the calculation is performed. >>> x2 = [[1, 2, 3, 5], ... [10, 20, 30, 50]] >>> stats.trim_mean(x2, 0.25) array([ 5.5, 11. , 16.5, 27.5]) >>> stats.trim_mean(x2, 0.25, axis=1) array([ 2.5, 25. ])", - "type": "function", - "file_path": "scipy\\scipy\\stats\\_stats_py.py", - "ast_data": "FunctionDef name:trim_mean arguments arg:a arg:proportiontocut arg:axis Assign Call call:asarray If Compare op:Eq Return return:yes If Compare op:Is Assign Call call:ravel Assign Assign Assign Call call:int Assign If Compare op:Gt Raise raises:ValueError('Proportion too big.') Assign Call call:partition Assign Assign Call call:slice Return return:yes" - }, - { - "library": "scrapy", - "name": "url_has_any_extension", - "source_code": "def url_has_any_extension(url: UrlT, extensions: Iterable[str]) -> bool: lowercase_path = _parse_url(url).path.lower() return any((lowercase_path.endswith(ext) for ext in extensions))", - "docstring": "Return True if the url ends with one of the extensions provided", - "type": "function", - "file_path": "scrapy\\scrapy\\utils\\url.py", - "ast_data": "FunctionDef name:url_has_any_extension arguments arg:url type:UrlT arg:extensions type:Iterable[str] Assign Call call:lower Return return:yes" - }, - { - "library": "pytorch", - "name": "__init__", - "source_code": "def __init__(self, name: str, input_nodes: list[Buffer], layout: Layout, input_reorder: Optional[list[int]] = None) -> None: super().__init__(name) self.input_nodes = input_nodes self.output_node: Buffer = Buffer(name = 'buf_out', layout = layout) self.input_reorder = input_reorder self.layout = layout", - "docstring": "Baseclass for ROCm C++ Templates, derived from KernelTemplate. Not to be instantiated directly. Args: name (str): The name of the ROCmTemplate object. input_nodes (List[IRNode]): A list of input IRNodes. layout (Layout): The layout of the output buffer / tensor. input_reorder (Optional[List[int]]): An optional list that specifies the order of the input nodes.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\rocm\\rocm_template.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:name type:str arg:input_nodes type:list[Buffer] arg:layout type:Layout arg:input_reorder type:Optional[list[int]] Assign Assign Assign" - }, - { - "library": "pytorch", - "name": "mem_efficient_sdp_enabled", - "source_code": "def mem_efficient_sdp_enabled(): return torch._C._get_mem_efficient_sdp_enabled()", - "docstring": ".. warning:: This flag is beta and subject to change. Returns whether memory efficient scaled dot product attention is enabled or not.", - "type": "function", - "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py", - "ast_data": "FunctionDef name:mem_efficient_sdp_enabled arguments Return return:yes" - }, - { - "library": "pytorch", - "name": "LowerCholeskyTransform", - "source_code": "class LowerCholeskyTransform(Transform): domain = constraints.independent(constraints.real, 2) codomain = constraints.lower_cholesky def __eq__(self, other): return isinstance(other, LowerCholeskyTransform) def _call(self, x): return x.tril(-1) + x.diagonal(dim1 = -2, dim2 = -1).exp().diag_embed() def _inverse(self, y): return y.tril(-1) + y.diagonal(dim1 = -2, dim2 = -1).log().diag_embed()", - "docstring": "Transform from unconstrained matrices to lower-triangular matrices with nonnegative diagonal entries. This is useful for parameterizing positive definite matrices in terms of their Cholesky factorization.", - "type": "class", - "file_path": "pytorch\\torch\\distributions\\transforms.py", - "ast_data": "ClassDef name:LowerCholeskyTransform Assign Call call:independent Assign FunctionDef name:__eq__ arguments arg:self arg:other Return return:yes FunctionDef name:_call arguments arg:self arg:x Return return:yes FunctionDef name:_inverse arguments arg:self arg:y Return return:yes" - }, - { - "library": "numpy", - "name": "as_symbol", - "source_code": "def as_symbol(obj): return Expr(Op.SYMBOL, obj)", - "docstring": "Return object as SYMBOL expression (variable or unparsed expression).", - "type": "function", - "file_path": "numpy\\numpy\\f2py\\symbolic.py", - "ast_data": "FunctionDef name:as_symbol arguments arg:obj Return return:yes" - }, - { - "library": "tensorflow", - "name": "regex_find", - "source_code": "def regex_find(orig_screen_output, regex, font_attr): new_screen_output = RichTextLines(orig_screen_output.lines, font_attr_segs = copy.deepcopy(orig_screen_output.font_attr_segs), annotations = orig_screen_output.annotations) try: re_prog = re.compile(regex) except re.error: raise ValueError('Invalid regular expression: \"%s\"' % regex) regex_match_lines = [] for i, line in enumerate(new_screen_output.lines): find_it = re_prog.finditer(line) match_segs = [] for match in find_it: match_segs.append((match.start(), match.end(), font_attr)) if match_segs: if i not in new_screen_output.font_attr_segs: new_screen_output.font_attr_segs[i] = match_segs else: new_screen_output.font_attr_segs[i].extend(match_segs) new_screen_output.font_attr_segs[i] = sorted(new_screen_output.font_attr_segs[i], key = lambda x: x[0]) regex_match_lines.append(i) new_screen_output.annotations[REGEX_MATCH_LINES_KEY] = regex_match_lines return new_screen_output", - "docstring": "Perform regex match in rich text lines. Produces a new RichTextLines object with font_attr_segs containing highlighted regex matches. Example use cases include: 1) search for specific items in a large list of items, and 2) search for specific numerical values in a large tensor. Args: orig_screen_output: The original RichTextLines, in which the regex find is to be performed. regex: The regex used for matching. font_attr: Font attribute used for highlighting the found result. Returns: A modified copy of orig_screen_output. Raises: ValueError: If input str regex is not a valid regular expression.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", - "ast_data": "FunctionDef name:regex_find arguments arg:orig_screen_output arg:regex arg:font_attr Assign Call call:RichTextLines Try Assign Call call:compile ExceptHandler Raise raises:ValueError('Invalid regular expression: \"%s\"' % regex) Assign For Call call:enumerate Assign Call call:finditer Assign For If If Compare op:NotIn Assign Assign Call call:sorted Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "TorchVersion", - "source_code": "class TorchVersion(str): __slots__ = () def _convert_to_version(self, inp: Any) -> Any: if isinstance(inp, Version): return inp elif isinstance(inp, str): return Version(inp) elif isinstance(inp, Iterable): return Version('.'.join((str(item) for item in inp))) else: raise InvalidVersion(inp) def _cmp_wrapper(self, cmp: Any, method: str) -> bool: try: return getattr(Version(self), method)(self._convert_to_version(cmp)) except BaseException as e: if not isinstance(e, InvalidVersion): raise return getattr(super(), method)(cmp)", - "docstring": "A string with magic powers to compare to both Version and iterables! Prior to 1.10.0 torch.__version__ was stored as a str and so many did comparisons against torch.__version__ as if it were a str. In order to not break them we have TorchVersion which masquerades as a str while also having the ability to compare against both packaging.version.Version as well as tuples of values, eg. (1, 2, 1) Examples: Comparing a TorchVersion object to a Version object TorchVersion('1.10.0a') > Version('1.10.0a') Comparing a TorchVersion object to a Tuple object TorchVersion('1.10.0a') > (1, 2) # 1.2 TorchVersion('1.10.0a') > (1, 2, 1) # 1.2.1 Comparing a TorchVersion object against a string TorchVersion('1.10.0a') > '1.2' TorchVersion('1.10.0a') > '1.2.1'", - "type": "class", - "file_path": "pytorch\\torch\\torch_version.py", - "ast_data": "ClassDef name:TorchVersion Assign FunctionDef name:_convert_to_version arguments arg:self arg:inp type:Any If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes Raise raises:InvalidVersion(inp) FunctionDef name:_cmp_wrapper arguments arg:self arg:cmp type:Any arg:method type:str Try Return return:yes ExceptHandler If Raise Return return:yes" - }, - { - "library": "feincms", - "name": "get_context_dict", - "source_code": "def get_context_dict(self): return {'content': self, 'portal': 'unknown'}", - "docstring": "Extend this if you need more variables passed to template", - "type": "method", - "file_path": "feincms\\feincms\\content\\video\\models.py", - "ast_data": "FunctionDef name:get_context_dict arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_sparsity_modes", - "source_code": "def get_sparsity_modes(model_object): if not model_object or not model_object.metadata: return [] result = set() for subgraph in model_object.subgraphs: for tensor in subgraph.tensors: if not tensor.sparsity: continue if tensor.sparsity.blockMap.size = = 0 or not tensor.sparsity.blockMap: result.add(conversion_metadata_fb.ModelOptimizationMode.RANDOM_SPARSITY) else: result.add(conversion_metadata_fb.ModelOptimizationMode.BLOCK_SPARSITY) return list(result)", - "docstring": "Get sparsity modes used in a tflite model. The sparsity modes are listed in conversion_metadata.fbs file. Args: model_object: A tflite model in object form. Returns: The list of sparsity modes used in the model.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py", - "ast_data": "FunctionDef name:get_sparsity_modes arguments arg:model_object If BoolOp Return return:yes Assign Call call:set For For If If BoolOp Compare op:Eq Return return:yes" - }, - { - "library": "kornia", - "name": "show", - "source_code": "def show(self, n_row: Optional[int] = None, backend: str = 'pil', display: bool = True) -> Optional[Any]: if self._output_image is None: raise ValueError('No pre-computed images found. Needs to execute first.') if len(self._output_image.shape) = = 3: out_image = self._output_image elif len(self._output_image.shape) = = 4: from kornia.utils.image import make_grid if n_row is None: n_row = math.ceil(self._output_image.shape[0] ** 0.5) out_image = make_grid(self._output_image, n_row, padding = 2) else: raise ValueError if backend = = 'pil' and display: Image.fromarray((out_image.permute(1, 2, 0).squeeze().numpy() * 255).astype(np.uint8)).show() return None if backend = = 'pil': return Image.fromarray((out_image.permute(1, 2, 0).squeeze().numpy() * 255).astype(np.uint8)) raise ValueError(f'Unsupported backend `{backend}`.')", - "docstring": "Return PIL images. Args: n_row: Number of images displayed in each row of the grid. backend: visualization backend. Only PIL is supported now. display: Whether or not to show the image.", - "type": "method", - "file_path": "kornia\\kornia\\core\\mixin\\image_module.py", - "ast_data": "FunctionDef name:show arguments arg:self arg:n_row type:Optional[int] arg:backend type:str arg:display type:bool If Compare op:Is Raise raises:ValueError('No pre-computed images found. Needs to execute first.') If Compare op:Eq Assign If Compare op:Eq If Compare op:Is Assign Call call:ceil Assign Call call:make_grid Raise raises:ValueError If BoolOp Compare op:Eq Return return:yes If Compare op:Eq Return return:yes Raise raises:ValueError(f'Unsupported backend `{backend}`.')" - }, - { - "library": "pytorch", - "name": "FCN", - "source_code": "class FCN(_SimpleSegmentationModel): pass", - "docstring": "Implements a Fully-Convolutional Network for semantic segmentation. Args: backbone (nn.Module): the network used to compute the features for the model. The backbone should return an OrderedDict[Tensor], with the key being \"out\" for the last feature map used, and \"aux\" if an auxiliary classifier is used. classifier (nn.Module): module that takes the \"out\" element returned from the backbone and returns a dense prediction. aux_classifier (nn.Module, optional): auxiliary classifier used during training", - "type": "class", - "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py", - "ast_data": "ClassDef name:FCN" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, wrapped_list): self._non_append_mutation_value = False self._external_modification_value = False super().__init__(wrapped_list) self._last_wrapped_list_snapshot = list(self._storage)", - "docstring": "Construct a new list wrapper. Args: wrapped_list: The initial value of the data structure. A shallow copy may be maintained for error checking. itself should not be modified directly after constructing the , and if changes are detected the will throw an exception on save.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\trackable\\data_structures.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:wrapped_list Assign Assign Assign Call call:list" - }, - { - "library": "django", - "name": "convert_extent", - "source_code": "def convert_extent(self, box): if box is None: return None shell = GEOSGeometry(box).shell xmin, ymin = shell[0][: 2] xmax, ymax = shell[2][: 2] return (xmin, ymin, xmax, ymax)", - "docstring": "Convert the polygon data received from SpatiaLite to min/max values.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\db\\backends\\spatialite\\operations.py", - "ast_data": "FunctionDef name:convert_extent arguments arg:self arg:box If Compare op:Is Return return:yes Assign Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "__call__", - "source_code": "def __call__(self, shape, dtype = None, **kwargs): del kwargs return constant_op.constant(self.value, dtype = _get_dtype(dtype), shape = shape)", - "docstring": "Returns a tensor object initialized to . Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. If not specified, is used, which default to unless you configured it otherwise (via ). **kwargs: Additional keyword arguments.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:shape arg:dtype kwarg:kwargs Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_antialiased", - "source_code": "def get_antialiased(self): return self._antialiased", - "docstring": "Return whether the object should try to do antialiased rendering.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", - "ast_data": "FunctionDef name:get_antialiased arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "__init__", - "source_code": "def __init__(self): super().__init__() self.Vertex = VertexCube", - "docstring": "Class for a vertex cache for a simplicial complex without an associated field. Useful only for building and visualising a domain complex. Parameters ----------", - "type": "method", - "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self Assign" - }, - { - "library": "mongo", - "name": "inserted_ids", - "source_code": "@property def inserted_ids(self) -> list[Any]: return self.__inserted_ids", - "docstring": "A list of _ids of the inserted documents, in the order provided. .. note:: If `ordered~pymongo.collection.Collection.insert_many` the server may have inserted the documents in a different order than what is presented here.", - "type": "method", - "file_path": "mongo\\pymongo\\results.py", - "ast_data": "FunctionDef name:inserted_ids arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "is_unsafe_leaf", - "source_code": "def is_unsafe_leaf(self, row, predicted_config, choice2time): return False", - "docstring": "Can be overridden by subclasses to define their own logic for deciding when a leaf is unsafe. Returns a sample that landed in the leaf, the choice predicted by the tree, and a dictionary that maps each choice to the execution time. One can for example decide to mark a leaf as unsafe if the predicted choice is 2x slower than the fastest choice. If a leaf is unsafe, the learned heuristic will always return 'unsure' if an input lands in that leaf.", - "type": "method", - "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py", - "ast_data": "FunctionDef name:is_unsafe_leaf arguments arg:self arg:row arg:predicted_config arg:choice2time Return return:yes" - }, - { - "library": "tensorflow", - "name": "dynamic", - "source_code": "@property def dynamic(self): return any((layer._dynamic for layer in self._flatten_layers()))", - "docstring": "Whether the layer is dynamic (eager-only); set in the constructor.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", - "ast_data": "FunctionDef name:dynamic arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "strict_fusion", - "source_code": "class strict_fusion: def __init__(self) -> None: if not torch._jit_internal.is_scripting(): warnings.warn('Only works in script mode') def __enter__(self): pass def __exit__(self, type: Any, value: Any, tb: Any) -> None: pass", - "docstring": "Give errors if not all nodes have been fused in inference, or symbolically differentiated in training. Example: Forcing fusion of additions. .. code-block:: python @torch.jit.script def foo(x): with torch.jit.strict_fusion(): return x + x + x", - "type": "class", - "file_path": "pytorch\\torch\\jit\\__init__.py", - "ast_data": "ClassDef name:strict_fusion FunctionDef name:__init__ arguments arg:self If FunctionDef name:__enter__ arguments arg:self FunctionDef name:__exit__ arguments arg:self arg:type type:Any arg:value type:Any arg:tb type:Any" - }, - { - "library": "tensorflow", - "name": "set_device_policy", - "source_code": "@tf_export('config.experimental.set_device_policy') def set_device_policy(device_policy): if device_policy = = 'silent': context.context().device_policy = context.DEVICE_PLACEMENT_SILENT elif device_policy = = 'silent_for_int32': context.context().device_policy = context.DEVICE_PLACEMENT_SILENT_FOR_INT32 elif device_policy = = 'warn': context.context().device_policy = context.DEVICE_PLACEMENT_WARN elif device_policy = = 'explicit': context.context().device_policy = context.DEVICE_PLACEMENT_EXPLICIT elif device_policy is None: context.context().device_policy = None else: raise ValueError(f'Invalid argument `device_policy`: {device_policy!r}. Please refer to https: //www.tensorflow.org/api_docs/python/tf/config/experimental/set_device_policy for valid `device_policy` arguments.')", - "docstring": "Sets the current thread device policy. The device policy controls how operations requiring inputs on a specific device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1). When using the default, an appropriate policy will be picked automatically. The default policy may change over time. This function only sets the device policy for the current thread. Any subsequently started thread will again use the default policy. Args: device_policy: A device policy. Valid values: - None: Switch to a system default. - 'warn': Copies the tensors which are not on the right device and logs a warning. - 'explicit': Raises an error if the placement is not as required. - 'silent': Silently copies the tensors. Note that this may hide performance problems as there is no notification provided when operations are blocked on the tensor being copied between devices. - 'silent_for_int32': silently copies tensors, raising errors on the other ones. Raises: ValueError: If an invalid is passed.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py", - "ast_data": "FunctionDef name:set_device_policy arguments arg:device_policy Call call:tf_export If Compare op:Eq Assign If Compare op:Eq Assign If Compare op:Eq Assign If Compare op:Eq Assign If Compare op:Is Assign Raise raises:ValueError(f'Invalid argument `device_policy`: {device_policy!r}. Please refer to https://www.tensorflow.org/api_docs/python/tf/config/experimental/set_device_policy for valid `device_policy` arguments.')" - }, - { - "library": "tensorflow", - "name": "scatter_mul", - "source_code": "def scatter_mul(self, sparse_delta, use_locking = False, name = None): raise NotImplementedError", - "docstring": "Multiply this variable by . Args: sparse_delta: to multiply this variable by. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", - "ast_data": "FunctionDef name:scatter_mul arguments arg:self arg:sparse_delta arg:use_locking arg:name Raise raises:NotImplementedError" - }, - { - "library": "scipy", - "name": "cspline2d", - "source_code": "def cspline2d(signal, lamb = 0.0, precision = -1.0): xp = array_namespace(signal) signal = np.asarray(signal) if precision < 0.0 or precision > = 1.0: if signal.dtype in [np.float32, np.complex64]: precision = 0.001 else: precision = 1e-06 if lamb < = 1 / 144.0: r = -2 + math.sqrt(3.0) out = symiirorder_nd(symiirorder1, signal, -r * 6.0, r, precision = precision, axis = -1) out = symiirorder_nd(symiirorder1, out, -r * 6.0, r, precision = precision, axis = 0) return out r, omega = compute_root_from_lambda(lamb) out = symiirorder_nd(symiirorder2, signal, r, omega, precision = precision, axis = -1) out = symiirorder_nd(symiirorder2, out, r, omega, precision = precision, axis = 0) return xp.asarray(out)", - "docstring": "Coefficients for 2-D cubic (3rd order) B-spline. Return the third-order B-spline coefficients over a regularly spaced input grid for the two-dimensional input image. Parameters ---------- input : ndarray The input signal. lamb : float Specifies the amount of smoothing in the transfer function. precision : float Specifies the precision for computing the infinite sum needed to apply mirror-symmetric boundary conditions. Returns ------- output : ndarray The filtered signal.", - "type": "function", - "file_path": "scipy\\scipy\\signal\\_spline_filters.py", - "ast_data": "FunctionDef name:cspline2d arguments arg:signal arg:lamb arg:precision Assign Call call:array_namespace Assign Call call:asarray If BoolOp Compare op:Lt Compare op:GtE If Compare op:In Assign Assign If Compare op:LtE Assign Assign Call call:symiirorder_nd Assign Call call:symiirorder_nd Return return:yes Assign Call call:compute_root_from_lambda Assign Call call:symiirorder_nd Assign Call call:symiirorder_nd Return return:yes" - }, - { - "library": "pytorch", - "name": "wait_for_broadcasts", - "source_code": "def wait_for_broadcasts(self) -> None: assert len(self.broadcast_handles) = = self.num_bucket_assignments, f'Missing at least one broadcast handle on rank {dist.get_rank()}' _ = [x.wait() for x in self.broadcast_handles] self.broadcast_handles.clear()", - "docstring": "Wait for all parameter broadcasts. This function should be called once all broadcasts have been scheduled, meaning `` in preparation for the next iteration.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\optim\\zero_redundancy_optimizer.py", - "ast_data": "FunctionDef name:wait_for_broadcasts arguments arg:self Assign" - }, - { - "library": "pytorch", - "name": "squash_mask", - "source_code": "def squash_mask(self, attach_sparsify_hook = True, **kwargs): for name, configs in self.data_groups.items(): configs['hook'].remove() configs.pop('hook') self.data_groups[name]['hook_state'] = 'None' if attach_sparsify_hook: configs['hook'] = configs['layer'].register_forward_pre_hook(self._sparsify_hook(name)) configs['hook_state'] = 'sparsify'", - "docstring": "Unregisters aggregate hook that was applied earlier and registers sparsification hooks if attach_sparsify_hook = True.", - "type": "method", - "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py", - "ast_data": "FunctionDef name:squash_mask arguments arg:self arg:attach_sparsify_hook kwarg:kwargs For Call call:items Assign If Assign Call call:register_forward_pre_hook Assign" - }, - { - "library": "coconut", - "name": "set_grammar_names", - "source_code": "@staticmethod def set_grammar_names(): for varname, val in vars(Grammar).items(): if hasattr(val, 'setName'): val.setName(varname)", - "docstring": "Set names of grammar elements to their variable names.", - "type": "method", - "file_path": "coconut\\coconut\\compiler\\grammar.py", - "ast_data": "FunctionDef name:set_grammar_names arguments For Call call:items If Call call:hasattr" - }, - { - "library": "matplotlib", - "name": "get_named_colors_mapping", - "source_code": "def get_named_colors_mapping(): return _colors_full_map", - "docstring": "Return the global mapping of names to named colors.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\colors.py", - "ast_data": "FunctionDef name:get_named_colors_mapping arguments Return return:yes" - }, - { - "library": "cherrypy", - "name": "index", - "source_code": "@cherrypy.expose def index(self): return 'We have an important message for you!'", - "docstring": "Produce HTTP response body of hello world app index URI.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\tutorial\\tut02_expose_methods.py", - "ast_data": "FunctionDef name:index arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "support", - "source_code": "def support(self, *args, **kwargs): args, loc, scale = self._parse_args(*args, **kwargs) arrs = np.broadcast_arrays(*args, loc, scale) args, loc, scale = (arrs[: -2], arrs[-2], arrs[-1]) cond = self._argcheck(*args) & (scale > 0) _a, _b = self._get_support(*args) if cond.all(): return (_a * scale + loc, _b * scale + loc) elif cond.ndim = = 0: return (self.badvalue, self.badvalue) _a, _b = (np.asarray(_a).astype('d'), np.asarray(_b).astype('d')) out_a, out_b = (_a * scale + loc, _b * scale + loc) place(out_a, 1 - cond, self.badvalue) place(out_b, 1 - cond, self.badvalue) return (out_a, out_b)", - "docstring": "Support of the distribution. Parameters ---------- arg1, arg2, ... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information). loc : array_like, optional location parameter, Default is 0. scale : array_like, optional scale parameter, Default is 1. Returns ------- a, b : array_like end-points of the distribution's support.", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", - "ast_data": "FunctionDef name:support arguments arg:self vararg:args kwarg:kwargs Assign Call call:_parse_args Assign Call call:broadcast_arrays Assign Assign Assign Call call:_get_support If Call call:all Return return:yes If Compare op:Eq Return return:yes Assign Assign Return return:yes" - }, - { - "library": "mongo", - "name": "release", - "source_code": "def release(self) -> None: if self._locked: self._locked = False self._wake_up_first() else: raise RuntimeError('Lock is not acquired')", - "docstring": "Release a lock. When the lock is locked, reset it to unlocked, and return. If any other tasks are blocked waiting for the lock to become unlocked, allow exactly one of them to proceed. When invoked on an unlocked lock, a RuntimeError is raised. There is no return value.", - "type": "method", - "file_path": "mongo\\pymongo\\_asyncio_lock.py", - "ast_data": "FunctionDef name:release arguments arg:self If Assign Raise raises:RuntimeError('Lock is not acquired')" - }, - { - "library": "tensorflow", - "name": "get_model_hash", - "source_code": "def get_model_hash(model): hash_value = 0 for subgraph in model.subgraphs: if subgraph.operators is not None: hash_value = update_hash_with_primitive_value(hash_value, len(subgraph.operators)) for operator in subgraph.operators: if operator.inputs is not None: hash_value = update_hash_with_array(hash_value, operator.inputs) if operator.outputs is not None: hash_value = update_hash_with_array(hash_value, operator.outputs) if subgraph.tensors is not None: hash_value = update_hash_with_primitive_value(hash_value, len(subgraph.tensors)) for tensor in subgraph.tensors: if tensor.buffer is not None: buffer = model.buffers[tensor.buffer] if buffer.data is not None: hash_value = update_hash_with_primitive_value(hash_value, len(buffer.data)) if tensor.shape is not None: hash_value = update_hash_with_array(hash_value, tensor.shape) if subgraph.inputs is not None: hash_value = update_hash_with_primitive_value(hash_value, len(subgraph.inputs)) if subgraph.outputs is not None: hash_value = update_hash_with_primitive_value(hash_value, len(subgraph.outputs)) return hash_value", - "docstring": "Calculate a 64-bit integer hash for a TensorFlow Lite model based on its structure. Args: model: A TensorFlow Lite model object. Returns: int: A 64-bit integer hash value representing the model structure.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py", - "ast_data": "FunctionDef name:get_model_hash arguments arg:model Assign For If Compare op:IsNot Assign Call call:update_hash_with_primitive_value For If Compare op:IsNot Assign Call call:update_hash_with_array If Compare op:IsNot Assign Call call:update_hash_with_array If Compare op:IsNot Assign Call call:update_hash_with_primitive_value For If Compare op:IsNot Assign If Compare op:IsNot Assign Call call:update_hash_with_primitive_value If Compare op:IsNot Assign Call call:update_hash_with_array If Compare op:IsNot Assign Call call:update_hash_with_primitive_value If Compare op:IsNot Assign Call call:update_hash_with_primitive_value Return return:yes" - }, - { - "library": "tensorflow", - "name": "UnconnectedGradients", - "source_code": "@tf_export('UnconnectedGradients') class UnconnectedGradients(enum.Enum): NONE = 'none' ZERO = 'zero'", - "docstring": "Controls how gradient computation behaves when y does not depend on x. The gradient of y with respect to x can be zero in two different ways: there could be no differentiable path in the graph connecting x to y (and so we can statically prove that the gradient is zero) or it could be that runtime values of tensors in a particular execution lead to a gradient of zero (say, if a relu unit happens to not be activated). To allow you to distinguish between these two cases you can choose what value gets returned for the gradient when there is no path in the graph from x to y: * : Indicates that [None] will be returned if there is no path from x to y * : Indicates that a zero tensor will be returned in the shape of x.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\ops\\unconnected_gradients.py", - "ast_data": "ClassDef name:UnconnectedGradients Call call:tf_export Assign Assign" - }, - { - "library": "mongo", - "name": "reset", - "source_code": "def reset(self) -> TopologyDescription: if self._topology_type = = TOPOLOGY_TYPE.ReplicaSetWithPrimary: topology_type = TOPOLOGY_TYPE.ReplicaSetNoPrimary else: topology_type = self._topology_type sds = {address: ServerDescription(address) for address in self._server_descriptions} return TopologyDescription(topology_type, sds, self._replica_set_name, self._max_set_version, self._max_election_id, self._topology_settings)", - "docstring": "A copy of this description, with all servers marked Unknown.", - "type": "method", - "file_path": "mongo\\pymongo\\topology_description.py", - "ast_data": "FunctionDef name:reset arguments arg:self If Compare op:Eq Assign Assign Assign Return return:yes" - }, - { - "library": "scipy", - "name": "kolmogn", - "source_code": "def kolmogn(n, x, cdf = True): it = np.nditer([n, x, cdf, None], flags = ['zerosize_ok'], op_dtypes = [None, np.float64, np.bool_, np.float64]) for _n, _x, _cdf, z in it: if np.isnan(_n): z[...] = _n continue if int(_n) ! = _n: raise ValueError(f'n is not integral: {_n}') z[...] = _kolmogn(int(_n), _x, cdf = _cdf) result = it.operands[-1] return result", - "docstring": "Computes the CDF for the two-sided Kolmogorov-Smirnov distribution. The two-sided Kolmogorov-Smirnov distribution has as its CDF Pr(D_n <= x), for a sample of size n drawn from a distribution with CDF F(t), where :math:, and :math: is the Empirical Cumulative Distribution Function of the sample. Parameters ---------- n : integer, array_like the number of samples x : float, array_like The K-S statistic, float between 0 and 1 cdf : bool, optional whether to compute the CDF(default=true) or the SF. Returns ------- cdf : ndarray CDF (or SF it cdf is False) at the specified locations. The return value has shape the result of numpy broadcasting n and x.", - "type": "function", - "file_path": "scipy\\scipy\\stats\\_ksstats.py", - "ast_data": "FunctionDef name:kolmogn arguments arg:n arg:x arg:cdf Assign Call call:nditer For If Call call:isnan Assign If Compare op:NotEq Raise raises:ValueError(f'n is not integral: {_n}') Assign Call call:_kolmogn Assign Return return:yes" - }, - { - "library": "scikit-learn", - "name": "coef_", - "source_code": "@property def coef_(self): if self.kernel ! = 'linear': raise AttributeError('coef_ is only available when using a linear kernel') coef = self._get_coef() if sp.issparse(coef): coef.data.flags.writeable = False else: coef.flags.writeable = False return coef", - "docstring": "Weights assigned to the features when . Returns ------- ndarray of shape (n_features, n_classes)", - "type": "method", - "file_path": "scikit-learn\\sklearn\\svm\\_base.py", - "ast_data": "FunctionDef name:coef_ arguments arg:self If Compare op:NotEq Raise raises:AttributeError('coef_ is only available when using a linear kernel') Assign Call call:_get_coef If Call call:issparse Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "__init__", - "source_code": "def __init__(self, kernel_name: str, runtime_arg_info: list['ArgInfo'], runtime_arg_values: list[Any]) -> None: super().__init__() self.kernel_name = kernel_name self.runtime_arg_info = runtime_arg_info self.runtime_arg_values = runtime_arg_values", - "docstring": "Initializes a new instance of the CUDATemplateKernel class. Args: kernel_name (str): The name of the kernel.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:kernel_name type:str arg:runtime_arg_info type:list['ArgInfo'] arg:runtime_arg_values type:list[Any] Assign Assign Assign" - }, - { - "library": "django", - "name": "get_list_display_links", - "source_code": "def get_list_display_links(self, request, list_display): if self.list_display_links or self.list_display_links is None or (not list_display): return self.list_display_links else: return list(list_display)[: 1]", - "docstring": "Return a sequence containing the fields to be displayed as links on the changelist. The list_display parameter is the list of fields returned by get_list_display().", - "type": "method", - "file_path": "django\\django\\contrib\\admin\\options.py", - "ast_data": "FunctionDef name:get_list_display_links arguments arg:self arg:request arg:list_display If BoolOp Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "add_real_datasets", - "source_code": "def add_real_datasets(self, datasets, other_datasets, cat_feature2cats, ranking = False): if other_datasets: for name, path in other_datasets: df_other, choices, _, _, _ = self.get_df(path, cat_feature2cats = cat_feature2cats, apply_filters = False, add_near_best = ranking) datasets[name] = df_other", - "docstring": "Adds datasets specified by the user to the datasets dictionary.", - "type": "method", - "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py", - "ast_data": "FunctionDef name:add_real_datasets arguments arg:self arg:datasets arg:other_datasets arg:cat_feature2cats arg:ranking If For Assign Call call:get_df Assign" - }, - { - "library": "pytorch", - "name": "build", - "source_code": "@staticmethod def build(rank: int, store: Store, local_addr: Optional[str], server_port: Optional[int] = None) -> 'RendezvousStoreInfo': if rank = = 0: addr = local_addr or socket.getfqdn() port = server_port or get_free_port() store.set(RendezvousStoreInfo.MASTER_ADDR_KEY, addr.encode(encoding = 'UTF-8')) store.set(RendezvousStoreInfo.MASTER_PORT_KEY, str(port).encode(encoding = 'UTF-8')) addr = store.get(RendezvousStoreInfo.MASTER_ADDR_KEY).decode(encoding = 'UTF-8') port = int(store.get(RendezvousStoreInfo.MASTER_PORT_KEY).decode(encoding = 'UTF-8')) return RendezvousStoreInfo(master_addr = addr, master_port = port)", - "docstring": "Factory method, finds unused new port on rank0 host and addr/port info with all ranks. If master_addr/master_port is knowns (useful when sharing existing tcp store server) use the constructor. Args: rank: rank of the current node store: store to use for rendezvous local_addr: address of the current node, if not provided will be resolved from hostname server_port: port of the TCPStore server, when the TCPStore is shared.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py", - "ast_data": "FunctionDef name:build arguments arg:rank type:int arg:store type:Store arg:local_addr type:Optional[str] arg:server_port type:Optional[int] If Compare op:Eq Assign BoolOp Call call:getfqdn Assign BoolOp Call call:get_free_port Assign Call call:decode Assign Call call:int Return return:yes" - }, - { - "library": "tensorflow", - "name": "calibrate", - "source_code": "@convert_phase(Component.OPTIMIZE_TFLITE_MODEL, SubComponent.CALIBRATE) def calibrate(self, dataset_gen): self._feed_tensors(dataset_gen, resize_input = True) return self._calibrator.Calibrate()", - "docstring": "Calibrates the model with specified generator. Returns: A model with min and max calibration stats. Args: dataset_gen: A generator that generates calibration samples.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\lite\\python\\optimize\\calibrator.py", - "ast_data": "FunctionDef name:calibrate arguments arg:self arg:dataset_gen Call call:convert_phase Return return:yes" - }, - { - "library": "mongo", - "name": "SessionOptions", - "source_code": "class SessionOptions: def __init__(self, causal_consistency: Optional[bool] = None, default_transaction_options: Optional[TransactionOptions] = None, snapshot: Optional[bool] = False) -> None: if snapshot: if causal_consistency: raise ConfigurationError('snapshot reads do not support causal_consistency = True') causal_consistency = False elif causal_consistency is None: causal_consistency = True self._causal_consistency = causal_consistency if default_transaction_options is not None: if not isinstance(default_transaction_options, TransactionOptions): raise TypeError('default_transaction_options must be an instance of pymongo.client_session.TransactionOptions, not: {!r}'.format(default_transaction_options)) self._default_transaction_options = default_transaction_options self._snapshot = snapshot @property def causal_consistency(self) -> bool: return self._causal_consistency @property def default_transaction_options(self) -> Optional[TransactionOptions]: return self._default_transaction_options @property def snapshot(self) -> Optional[bool]: return self._snapshot", - "docstring": "Options for a new :class:. :param causal_consistency: If True, read operations are causally ordered within the session. Defaults to True when the `` parameter.", - "type": "class", - "file_path": "mongo\\pymongo\\synchronous\\client_session.py", - "ast_data": "ClassDef name:SessionOptions FunctionDef name:__init__ arguments arg:self arg:causal_consistency type:Optional[bool] arg:default_transaction_options type:Optional[TransactionOptions] arg:snapshot type:Optional[bool] If If Raise raises:ConfigurationError('snapshot reads do not support causal_consistency=True') Assign If Compare op:Is Assign Assign If Compare op:IsNot If Raise raises:TypeError('default_transaction_options must be an instance of pymongo.client_session.TransactionOptions, not: {!r}'.format(default_transaction_options)) Assign Assign FunctionDef name:causal_consistency arguments arg:self Return return:yes FunctionDef name:default_transaction_options arguments arg:self Return return:yes FunctionDef name:snapshot arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "__call__", - "source_code": "def __call__(self, direction, factor, values): if self._fallback_formatter: fallback_strings = self._fallback_formatter(direction, factor, values) else: fallback_strings = [''] * len(values) return [self._format_dict.get(k, v) for k, v in zip(values, fallback_strings)]", - "docstring": "factor is ignored if value is found in the dictionary", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\grid_finder.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:direction arg:factor arg:values If Assign Call call:_fallback_formatter Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "visit_ImportFrom", - "source_code": "def visit_ImportFrom(self, node): if not node.module: self.generic_visit(node) return from_import = node.module for import_alias in node.names: full_module_name = '%s.%s' % (from_import, import_alias.name) full_import = (full_module_name, import_alias.asname) detection = self._api_analysis_spec.imports_to_detect.get(full_import, None) if detection: self.add_result(detection) self.add_log(detection.log_level, node.lineno, node.col_offset, detection.log_message) self.generic_visit(node)", - "docstring": "Handle visiting an import-from node in the AST. Args: node: Current Node", - "type": "method", - "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py", - "ast_data": "FunctionDef name:visit_ImportFrom arguments arg:self arg:node If Return return:no Assign For Assign Assign Assign Call call:get If" - }, - { - "library": "pytorch", - "name": "set_materialize_grads", - "source_code": "def set_materialize_grads(self, value: bool): self.materialize_grads = value", - "docstring": "Set whether to materialize grad tensors. Default is `setup_contextforwardbackwardjvp` methods. Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) >>> class SimpleFunc(Function): >>> @staticmethod >>> def forward(ctx, x): >>> return x.clone(), x.clone() >>> >>> @staticmethod >>> @once_differentiable >>> def backward(ctx, g1, g2): >>> return g1 + g2 # No check for None necessary >>> >>> # We modify SimpleFunc to handle non-materialized grad outputs >>> class Func(Function): >>> @staticmethod >>> def forward(ctx, x): >>> ctx.set_materialize_grads(False) >>> ctx.save_for_backward(x) >>> return x.clone(), x.clone() >>> >>> @staticmethod >>> @once_differentiable >>> def backward(ctx, g1, g2): >>> x, = ctx.saved_tensors >>> grad_input = torch.zeros_like(x) >>> if g1 is not None: # We must check for None now >>> grad_input += g1 >>> if g2 is not None: >>> grad_input += g2 >>> return grad_input >>> >>> a = torch.tensor(1., requires_grad=True) >>> b, _ = Func.apply(a) # induces g2 to be undefined", - "type": "method", - "file_path": "pytorch\\torch\\autograd\\function.py", - "ast_data": "FunctionDef name:set_materialize_grads arguments arg:self arg:value type:bool Assign" - }, - { - "library": "matplotlib", - "name": "set_parse_math", - "source_code": "def set_parse_math(self, parse_math): self._parse_math = bool(parse_math)", - "docstring": "Override switch to disable any mathtext parsing for this . Parameters ---------- parse_math : bool If False, this will never use mathtext. If True, mathtext will be used if there is an even number of unescaped dollar signs.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\text.py", - "ast_data": "FunctionDef name:set_parse_math arguments arg:self arg:parse_math Assign Call call:bool" - }, - { - "library": "numpy", - "name": "param_parse", - "source_code": "def param_parse(d, params): if '(' in d: dname = d[: d.find('(')] ddims = d[d.find('(') + 1: d.rfind(')')] index = int(param_parse(ddims, params)) return str(params[dname][index]) elif d in params: return str(params[d]) else: for p in params: re_1 = re.compile('(?P.*?)\\\\b' + p + '\\\\b(?P.*)', re.I) m = re_1.match(d) while m: d = m.group('before') + str(params[p]) + m.group('after') m = re_1.match(d) return d", - "docstring": "Recursively parse array dimensions. Parses the declaration of an array variable or parameter keyword, and is called recursively if the dimension for this array is a previously defined parameter (found in ). Parameters ---------- d : str Fortran expression describing the dimension of an array. params : dict Previously parsed parameters declared in the Fortran source file. Returns ------- out : str Parsed dimension expression. Examples -------- * If the line being analyzed is then and we return immediately, with >>> d = '2' >>> param_parse(d, params) 2 * If the line being analyzed is then ; since is a previously parsed parameter, and , we call recursively, to obtain >>> d = 'pa' >>> params = {'pa': 3} >>> param_parse(d, params) 3 * If the line being analyzed is then ; since is a previously parsed parameter, and , we call recursively, to obtain >>> d = 'pa(1)' >>> params = dict(pa={1: 3, 2: 5}) >>> param_parse(d, params) 3", - "type": "function", - "file_path": "numpy\\numpy\\f2py\\crackfortran.py", - "ast_data": "FunctionDef name:param_parse arguments arg:d arg:params If Compare op:In Assign Assign Assign Call call:int Return return:yes If Compare op:In Return return:yes For Assign Call call:compile Assign Call call:match While Assign Assign Call call:match Return return:yes" - }, - { - "library": "scikit-learn", - "name": "get_feature_names_out", - "source_code": "def get_feature_names_out(self, input_features = None): feature_names_out = input_features for _, name, transform in self._iter(): if not hasattr(transform, 'get_feature_names_out'): raise AttributeError('Estimator {} does not provide get_feature_names_out. Did you mean to call pipeline[: -1].get_feature_names_out()?'.format(name)) feature_names_out = transform.get_feature_names_out(feature_names_out) return feature_names_out", - "docstring": "Get output feature names for transformation. Transform input features using the pipeline. Parameters ---------- input_features : array-like of str or None, default=None Input features. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\pipeline.py", - "ast_data": "FunctionDef name:get_feature_names_out arguments arg:self arg:input_features Assign For Call call:_iter If Raise raises:AttributeError('Estimator {} does not provide get_feature_names_out. Did you mean to call pipeline[:-1].get_feature_names_out()?'.format(name)) Assign Call call:get_feature_names_out Return return:yes" - }, - { - "library": "pytorch", - "name": "is_scripting", - "source_code": "def is_scripting() -> bool: return False", - "docstring": "Function that returns True when in compilation and False otherwise. This is useful especially with the @unused decorator to leave code in your model that is not yet TorchScript compatible. .. testcode:: import torch @torch.jit.unused def unsupported_linear_op(x): return x def linear(x): if torch.jit.is_scripting(): return torch.linear(x) else: return unsupported_linear_op(x)", - "type": "function", - "file_path": "pytorch\\torch\\_jit_internal.py", - "ast_data": "FunctionDef name:is_scripting arguments Return return:yes" - }, - { - "library": "tensorflow", - "name": "warmup", - "source_code": "def warmup(): start('') stop(save = False)", - "docstring": "Warm-up the profiler session. The profiler session will set up profiling context, including loading CUPTI library for GPU profiling. This is used for improving the accuracy of the profiling results.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\profiler\\profiler_v2.py", - "ast_data": "FunctionDef name:warmup arguments" - }, - { - "library": "matplotlib", - "name": "draggable", - "source_code": "def draggable(self, state = None, use_blit = False): from matplotlib.offsetbox import DraggableAnnotation is_draggable = self._draggable is not None if state is None: state = not is_draggable if state: if self._draggable is None: self._draggable = DraggableAnnotation(self, use_blit) else: if self._draggable is not None: self._draggable.disconnect() self._draggable = None return self._draggable", - "docstring": "Set whether the annotation is draggable with the mouse. Parameters ---------- state : bool or None - True or False: set the draggability. - None: toggle the draggability. use_blit : bool, default: False Use blitting for faster image composition. For details see :ref:. Returns ------- DraggableAnnotation or None If the annotation is draggable, the corresponding helper is returned.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\text.py", - "ast_data": "FunctionDef name:draggable arguments arg:self arg:state arg:use_blit Assign Compare op:IsNot If Compare op:Is Assign If If Compare op:Is Assign Call call:DraggableAnnotation If Compare op:IsNot Assign Return return:yes" - }, - { - "library": "pygame", - "name": "add_internal", - "source_code": "def add_internal(self, sprite, layer = None): self.spritedict[sprite] = self._init_rect if layer is None: try: layer = sprite.layer except AttributeError: layer = self._default_layer setattr(sprite, '_layer', layer) elif hasattr(sprite, '_layer'): setattr(sprite, '_layer', layer) sprites = self._spritelist sprites_layers = self._spritelayers sprites_layers[sprite] = layer leng = len(sprites) low = mid = 0 high = leng - 1 while low < = high: mid = low + (high - low) // 2 if sprites_layers[sprites[mid]] < = layer: low = mid + 1 else: high = mid - 1 while mid < leng and sprites_layers[sprites[mid]] < = layer: mid + = 1 sprites.insert(mid, sprite)", - "docstring": "Do not use this method directly. It is used by the group to add a sprite internally.", - "type": "method", - "file_path": "pygame\\src_py\\sprite.py", - "ast_data": "FunctionDef name:add_internal arguments arg:self arg:sprite arg:layer Assign If Compare op:Is Try Assign ExceptHandler Assign If Call call:hasattr Assign Assign Assign Assign Call call:len Assign Assign While Compare op:LtE Assign If Compare op:LtE Assign Assign While BoolOp Compare op:Lt Compare op:LtE" - }, - { - "library": "pytorch", - "name": "get_annotation_str", - "source_code": "def get_annotation_str(annotation): if isinstance(annotation, ast.Name): return annotation.id elif isinstance(annotation, ast.Attribute): return '.'.join([get_annotation_str(annotation.value), annotation.attr]) elif isinstance(annotation, ast.Subscript): subscript_slice = annotation.slice return f'{get_annotation_str(annotation.value)}[{get_annotation_str(subscript_slice)}]' elif isinstance(annotation, ast.Tuple): return ', '.join([get_annotation_str(elt) for elt in annotation.elts]) elif isinstance(annotation, ast.Constant): return f'{annotation.value}' return None", - "docstring": "Convert an AST node containing a type annotation to the string present in the source that represents the same annotation.", - "type": "function", - "file_path": "pytorch\\torch\\_jit_internal.py", - "ast_data": "FunctionDef name:get_annotation_str arguments arg:annotation If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Assign Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "reset_refcounts", - "source_code": "def reset_refcounts(self, to_counts): for alias, cur_refcount in self.alias_refcount.copy().items(): unref_amount = cur_refcount - to_counts.get(alias, 0) self.unref_alias(alias, unref_amount)", - "docstring": "Reset reference counts for aliases so that they match the value passed in .", - "type": "method", - "file_path": "django\\django\\db\\models\\sql\\query.py", - "ast_data": "FunctionDef name:reset_refcounts arguments arg:self arg:to_counts For Call call:items Assign" - }, - { - "library": "scikit-learn", - "name": "partial_fit", - "source_code": "@_fit_context(prefer_skip_nested_validation = True) def partial_fit(self, X, y): if not hasattr(self, 'coef_'): self._more_validate_params(for_partial_fit = True) lr = 'pa1' if self.loss = = 'epsilon_insensitive' else 'pa2' return self._partial_fit(X, y, alpha = 1.0, C = self.C, loss = 'epsilon_insensitive', learning_rate = lr, max_iter = 1, sample_weight = None, coef_init = None, intercept_init = None)", - "docstring": "Fit linear model with Passive Aggressive algorithm. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Subset of training data. y : numpy array of shape [n_samples] Subset of target values. Returns ------- self : object Fitted estimator.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\linear_model\\_passive_aggressive.py", - "ast_data": "FunctionDef name:partial_fit arguments arg:self arg:X arg:y Call call:_fit_context If Assign Return return:yes" - }, - { - "library": "django", - "name": "module_to_dict", - "source_code": "def module_to_dict(module, omittable = lambda k: k.startswith('_') or not k.isupper()): return {k: repr(getattr(module, k)) for k in dir(module) if not omittable(k)}", - "docstring": "Convert a module namespace to a Python dictionary.", - "type": "function", - "file_path": "django\\django\\core\\management\\commands\\diffsettings.py", - "ast_data": "FunctionDef name:module_to_dict arguments arg:module arg:omittable Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "@deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https: //github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once = True) def __init__(self, df, loc, scale, validate_args = False, allow_nan_stats = True, name = 'StudentT'): parameters = dict(locals()) with ops.name_scope(name, values = [df, loc, scale]) as name: with ops.control_dependencies([check_ops.assert_positive(df)] if validate_args else []): self._df = array_ops.identity(df, name = 'df') self._loc = array_ops.identity(loc, name = 'loc') self._scale = array_ops.identity(scale, name = 'scale') check_ops.assert_same_float_dtype((self._df, self._loc, self._scale)) super(StudentT, self).__init__(dtype = self._scale.dtype, reparameterization_type = distribution.FULLY_REPARAMETERIZED, validate_args = validate_args, allow_nan_stats = allow_nan_stats, parameters = parameters, graph_parents = [self._df, self._loc, self._scale], name = name)", - "docstring": "Construct Student's t distributions. The distributions have degree of freedom , mean , and scale . The parameters , , and must be shaped in a way that supports broadcasting (e.g. is a valid operation). Args: df: Floating-point . The degrees of freedom of the distribution(s). must contain only positive values. loc: Floating-point . The mean(s) of the distribution(s). scale: Floating-point . The scaling factor(s) for the distribution(s). Note that is not technically the standard deviation of this distribution but has semantics more similar to standard deviation than variance. validate_args: Python , default . When distribution parameters are checked for validity despite possibly degrading runtime performance. When invalid inputs may silently render incorrect outputs. allow_nan_stats: Python , default . When , statistics (e.g., mean, mode, variance) use the value \"\" to indicate the result is undefined. When , an exception is raised if one or more of the statistic's batch members are undefined. name: Python name prefixed to Ops created by this class. Raises: TypeError: if loc and scale are different dtypes.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\student_t.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:df arg:loc arg:scale arg:validate_args arg:allow_nan_stats arg:name Call call:deprecated Assign Call call:dict With With Assign Call call:identity Assign Call call:identity Assign Call call:identity" - }, - { - "library": "scipy", - "name": "nnz", - "source_code": "@property def nnz(self) -> int: return self._getnnz()", - "docstring": "Number of stored values, including explicit zeros. See also -------- count_nonzero : Number of non-zero entries", - "type": "method", - "file_path": "scipy\\scipy\\sparse\\_base.py", - "ast_data": "FunctionDef name:nnz arguments arg:self Return return:yes" - }, - { - "library": "scrapy", - "name": "curl_to_request_kwargs", - "source_code": "def curl_to_request_kwargs(curl_command: str, ignore_unknown_options: bool = True) -> dict[str, Any]: curl_args = split(curl_command) if curl_args[0] ! = 'curl': raise ValueError('A curl command must start with \"curl\"') parsed_args, argv = curl_parser.parse_known_args(curl_args[1:]) if argv: msg = f'Unrecognized options: {', '.join(argv)}' if ignore_unknown_options: warnings.warn(msg) else: raise ValueError(msg) url = parsed_args.url parsed_url = urlparse(url) if not parsed_url.scheme: url = 'http: //' + url method = parsed_args.method or 'GET' result: dict[str, Any] = {'method': method.upper(), 'url': url} headers, cookies = _parse_headers_and_cookies(parsed_args) if headers: result['headers'] = headers if cookies: result['cookies'] = cookies if parsed_args.data: result['body'] = parsed_args.data if not parsed_args.method: result['method'] = 'POST' return result", - "docstring": "Convert a cURL command syntax to Request kwargs. :param str curl_command: string containing the curl command :param bool ignore_unknown_options: If true, only a warning is emitted when cURL options are unknown. Otherwise raises an error. (default: True) :return: dictionary of Request kwargs", - "type": "function", - "file_path": "scrapy\\scrapy\\utils\\curl.py", - "ast_data": "FunctionDef name:curl_to_request_kwargs arguments arg:curl_command type:str arg:ignore_unknown_options type:bool Assign Call call:split If Compare op:NotEq Raise raises:ValueError('A curl command must start with \"curl\"') Assign Call call:parse_known_args If Assign If Raise raises:ValueError(msg) Assign Assign Call call:urlparse If Assign Assign BoolOp Assign Call call:_parse_headers_and_cookies If Assign If Assign If Assign If Assign Return return:yes" - }, - { - "library": "numpy", - "name": "add_subpackage", - "source_code": "def add_subpackage(self, subpackage_name, subpackage_path = None, standalone = False): if standalone: parent_name = None else: parent_name = self.name config_list = self.get_subpackage(subpackage_name, subpackage_path, parent_name = parent_name, caller_level = 2) if not config_list: self.warn('No configuration returned, assuming unavailable.') for config in config_list: d = config if isinstance(config, Configuration): d = config.todict() assert isinstance(d, dict), repr(type(d)) self.info('Appending %s configuration to %s' % (d.get('name'), self.name)) self.dict_append(**d) dist = self.get_distribution() if dist is not None: self.warn('distutils distribution has been initialized, it may be too late to add a subpackage ' + subpackage_name)", - "docstring": "Add a sub-package to the current Configuration instance. This is useful in a setup.py script for adding sub-packages to a package. Parameters ---------- subpackage_name : str name of the subpackage subpackage_path : str if given, the subpackage path such as the subpackage is in subpackage_path / subpackage_name. If None,the subpackage is assumed to be located in the local path / subpackage_name. standalone : bool", - "type": "method", - "file_path": "numpy\\numpy\\distutils\\misc_util.py", - "ast_data": "FunctionDef name:add_subpackage arguments arg:self arg:subpackage_name arg:subpackage_path arg:standalone If Assign Assign Assign Call call:get_subpackage If For Assign If Call call:isinstance Assign Call call:todict Assign Call call:get_distribution If Compare op:IsNot" - }, - { - "library": "pytorch", - "name": "preview_type_promotion", - "source_code": "@abc.abstractmethod def preview_type_promotion(self, args: tuple, kwargs: dict) -> TypePromotionSnapshot: ...", - "docstring": "Preview type promotion results for provided set of args and kwargs. Returns a TypePromotionSnapshot object that contains the promoted dtypes for the arguments and the expected output dtype.", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py", - "ast_data": "FunctionDef name:preview_type_promotion arguments arg:self arg:args type:tuple arg:kwargs type:dict" - }, - { - "library": "matplotlib", - "name": "transformed", - "source_code": "def transformed(self, transform): return Path(transform.transform(self.vertices), self.codes, self._interpolation_steps)", - "docstring": "Return a transformed copy of the path. See Also -------- matplotlib.transforms.TransformedPath A specialized path class that will cache the transformed result and automatically update when the transform changes.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\path.py", - "ast_data": "FunctionDef name:transformed arguments arg:self arg:transform Return return:yes" - }, - { - "library": "tensorflow", - "name": "reduce", - "source_code": "def reduce(self, fn, *args): assert not context.executing_eagerly() tensor_specs = [] for arg in args: if not isinstance(arg, tensor_lib.Tensor): raise ValueError(f'Got a non-Tensor argument {arg} in reduce.') batched_shape = tensor_shape.TensorShape([self._maybe_iters]).concatenate(arg.shape) tensor_specs.append(tensor_lib.TensorSpec(shape = batched_shape, dtype = arg.dtype)) concrete_function = def_function.function(fn).get_concrete_function(*tensor_specs) pl_outputs = [] with ops.control_dependencies(args): for output in concrete_function.outputs: if not isinstance(output, tensor_lib.Tensor): raise ValueError(f'Got a non-Tensor output {output} while running reduce.') if output.shape.is_fully_defined(): dummy = array_ops.zeros(output.shape.as_list(), dtype = output.dtype) pl_outputs.append(array_ops.placeholder_with_default(dummy, shape = output.shape)) else: pl_outputs.append(array_ops.placeholder(output.dtype, shape = output.shape)) reduction_op = array_ops.identity_n(pl_outputs)[0].op self._reduce_map[reduction_op] = (concrete_function, args) if len(reduction_op.outputs) = = 1: return reduction_op.outputs[0] else: return tuple(reduction_op.outputs)", - "docstring": "Performs reduction on vectorized across pfor iterations. Note that is traced once inside the loop function context. Hence any captures or side-effects will happen in that context. Call to the traced version of happens during the construction of the vectorized code. Note that this currently may not work inside a control flow construct. Args: fn: a reduction function. It will be called with arguments that have the same structure as *args but with individual values whose rank may be higher by 1 since they represent loop invariant vectorized versions of the corresponding Tensors in *args. *args: unvectorized Tensors. Returns: The result of running on the vectorized versions of . These outputs will be available as loop invariant values to all the iterations.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", - "ast_data": "FunctionDef name:reduce arguments arg:self arg:fn vararg:args Assign For If Raise raises:ValueError(f'Got a non-Tensor argument {arg} in reduce.') Assign Call call:concatenate Assign Call call:get_concrete_function Assign With For If Raise raises:ValueError(f'Got a non-Tensor output {output} while running reduce.') If Call call:is_fully_defined Assign Call call:zeros Assign Assign If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "setitem", - "source_code": "def setitem(self, indexer, value) -> Block: value = self._standardize_fill_value(value) values = cast(np.ndarray, self.values) if self.ndim = = 2: values = values.T check_setitem_lengths(indexer, value, values) if self.dtype ! = _dtype_obj: value = extract_array(value, extract_numpy = True) try: casted = np_can_hold_element(values.dtype, value) except LossySetitemError: nb = self.coerce_to_target_dtype(value, raise_on_upcast = True) return nb.setitem(indexer, value) else: if self.dtype = = _dtype_obj: vi = values[indexer] if lib.is_list_like(vi): casted = setitem_datetimelike_compat(values, len(vi), casted) self = self._maybe_copy(inplace = True) values = cast(np.ndarray, self.values.T) if isinstance(casted, np.ndarray) and casted.ndim = = 1 and (len(casted) = = 1): casted = casted[0, ...] try: values[indexer] = casted except (TypeError, ValueError) as err: if is_list_like(casted): raise ValueError('setting an array element with a sequence.') from err raise return self", - "docstring": "Attempt self.values[indexer] = value, possibly creating a new array. Parameters ---------- indexer : tuple, list-like, array-like, slice, int The subset of self.values to set value : object The value being set Returns ------- Block Notes ----- is a direct slice/positional indexer. must be a compatible shape.", - "type": "method", - "file_path": "pandas\\pandas\\core\\internals\\blocks.py", - "ast_data": "FunctionDef name:setitem arguments arg:self arg:indexer arg:value Assign Call call:_standardize_fill_value Assign Call call:cast If Compare op:Eq Assign If Compare op:NotEq Assign Call call:extract_array Try Assign Call call:np_can_hold_element ExceptHandler Assign Call call:coerce_to_target_dtype Return return:yes If Compare op:Eq Assign If Call call:is_list_like Assign Call call:setitem_datetimelike_compat Assign Call call:_maybe_copy Assign Call call:cast If BoolOp Call call:isinstance Compare op:Eq Compare op:Eq Assign Try Assign ExceptHandler If Call call:is_list_like Raise raises:ValueError('setting an array element with a sequence.') Raise Return return:yes" - }, - { - "library": "sphinx", - "name": "init", - "source_code": "def init(self, builder: Builder, theme: Theme | None = None, dirs: list[str] | None = None) -> None: msg = 'must be implemented in subclasses' raise NotImplementedError(msg)", - "docstring": "Called by the builder to initialize the template system. *builder* is the builder object; you'll probably want to look at the value of `sphinx.theming.Theme` object or None; in the latter case, *dirs* can be list of fixed directories to look for templates.", - "type": "method", - "file_path": "sphinx\\sphinx\\application.py", - "ast_data": "FunctionDef name:init arguments arg:self arg:builder type:Builder arg:theme type:Theme | None arg:dirs type:list[str] | None Assign Raise raises:NotImplementedError(msg)" - }, - { - "library": "pandas", - "name": "is_dtype", - "source_code": "@classmethod def is_dtype(cls, dtype: object) -> bool: dtype = getattr(dtype, 'dtype', dtype) if isinstance(dtype, (ABCSeries, ABCIndex, ABCDataFrame, np.dtype)): return False elif dtype is None: return False elif isinstance(dtype, cls): return True if isinstance(dtype, str): try: return cls.construct_from_string(dtype) is not None except TypeError: return False return False", - "docstring": "Check if we match 'dtype'. Parameters ---------- dtype : object The object to check. Returns ------- bool Notes ----- The default implementation is True if 1. ``.", - "type": "method", - "file_path": "pandas\\pandas\\core\\dtypes\\base.py", - "ast_data": "FunctionDef name:is_dtype arguments arg:cls arg:dtype type:object Assign Call call:getattr If Call call:isinstance Return return:yes If Compare op:Is Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Try Return return:yes ExceptHandler Return return:yes Return return:yes" - }, - { - "library": "scikit-learn", - "name": "staged_predict_proba", - "source_code": "def staged_predict_proba(self, X): for raw_predictions in self._staged_raw_predict(X): yield self._loss.predict_proba(raw_predictions)", - "docstring": "Predict class probabilities at each iteration. This method allows monitoring (i.e. determine error on testing set) after each stage. Parameters ---------- X : array-like of shape (n_samples, n_features) The input samples. Yields ------ y : generator of ndarray of shape (n_samples,) The predicted class probabilities of the input samples, for each iteration.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\gradient_boosting.py", - "ast_data": "FunctionDef name:staged_predict_proba arguments arg:self arg:X For Call call:_staged_raw_predict" - }, - { - "library": "scrapy", - "name": "maxpriority", - "source_code": "def maxpriority(self) -> int: if len(self) > 0: return max((cast(int, self.getpriority(name)) for name in self)) return get_settings_priority('default')", - "docstring": "Return the numerical value of the highest priority present throughout all settings, or the numerical value for `~scrapy.settings.SETTINGS_PRIORITIES` if there are no settings stored.", - "type": "method", - "file_path": "scrapy\\scrapy\\settings\\__init__.py", - "ast_data": "FunctionDef name:maxpriority arguments arg:self If Compare op:Gt Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "setup", - "source_code": "@abc.abstractmethod def setup(self, fig, outfile, dpi = None): Path(outfile).parent.resolve(strict = True) self.outfile = outfile self.fig = fig if dpi is None: dpi = self.fig.dpi self.dpi = dpi", - "docstring": "Setup for writing the movie file. Parameters ---------- fig : The figure object that contains the information for frames. outfile : str The filename of the resulting movie file. dpi : float, default: `` The DPI (or resolution) for the file. This controls the size in pixels of the resulting movie file.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\animation.py", - "ast_data": "FunctionDef name:setup arguments arg:self arg:fig arg:outfile arg:dpi Assign Assign If Compare op:Is Assign Assign" - }, - { - "library": "pytorch", - "name": "filename", - "source_code": "@property def filename(self) -> _Optional[str]: return self._untyped_storage.filename", - "docstring": "Returns the file name associated with this storage if the storage was memory mapped from a file. or `` if the storage was not created by memory mapping a file.", - "type": "method", - "file_path": "pytorch\\torch\\storage.py", - "ast_data": "FunctionDef name:filename arguments arg:self Return return:yes" - }, - { - "library": "cherrypy", - "name": "httpserver_from_self", - "source_code": "def httpserver_from_self(self, httpserver = None): if httpserver is None: httpserver = self.instance if httpserver is None: from cherrypy import _cpwsgi_server httpserver = _cpwsgi_server.CPWSGIServer(self) if isinstance(httpserver, text_or_bytes): httpserver = attributes(httpserver)(self) return (httpserver, self.bind_addr)", - "docstring": "Return a (httpserver, bind_addr) pair based on self attributes.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\_cpserver.py", - "ast_data": "FunctionDef name:httpserver_from_self arguments arg:self arg:httpserver If Compare op:Is Assign If Compare op:Is Assign Call call:CPWSGIServer If Call call:isinstance Assign Call Return return:yes" - }, - { - "library": "tensorflow", - "name": "WhileCondFuncGraph", - "source_code": "class WhileCondFuncGraph(ControlFlowFuncGraph): pass", - "docstring": "FuncGraph for the condition of tf.while_loop(). This is used to distinguish while conditions from other functions.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_v2_func_graphs.py", - "ast_data": "ClassDef name:WhileCondFuncGraph" - }, - { - "library": "pytorch", - "name": "track_inputs", - "source_code": "def track_inputs(self, inputs: tuple[Any, ...]) -> None: def _track_inputs(t: torch.Tensor) -> None: self._update_and_maybe_create_winfos(t, _FSDPRefType.INP) tree_map_only(torch.Tensor, _track_inputs, inputs)", - "docstring": "This is used to track the input tensors to the model and annotate them as ``. Args: inputs (Tuple[Any]): A tuple containing the input data. This can include tensors as well as other data types. Only tensors will be tracked.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\_tools\\fsdp2_mem_tracker.py", - "ast_data": "FunctionDef name:track_inputs arguments arg:self arg:inputs type:tuple[Any, ...] FunctionDef name:_track_inputs arguments arg:t type:torch.Tensor" - }, - { - "library": "pandas", - "name": "MultiIndexPyIntEngine", - "source_code": "class MultiIndexPyIntEngine(libindex.BaseMultiIndexCodesEngine, libindex.ObjectEngine): _base = libindex.ObjectEngine _codes_dtype = 'object'", - "docstring": "Manages a MultiIndex by mapping label combinations to positive integers. This class manages those (extreme) cases in which the number of possible label combinations overflows the 64 bits integers, and uses an ObjectEngine containing Python integers.", - "type": "class", - "file_path": "pandas\\pandas\\core\\indexes\\multi.py", - "ast_data": "ClassDef name:MultiIndexPyIntEngine Assign Assign" - }, - { - "library": "matplotlib", - "name": "stop_filter", - "source_code": "def stop_filter(self, post_processing): orig_img = np.asarray(self.buffer_rgba()) slice_y, slice_x = cbook._get_nonzero_slices(orig_img[..., 3]) cropped_img = orig_img[slice_y, slice_x] self._renderer = self._filter_renderers.pop() self._update_methods() if cropped_img.size: img, ox, oy = post_processing(cropped_img / 255, self.dpi) gc = self.new_gc() if img.dtype.kind = = 'f': img = np.asarray(img * 255.0, np.uint8) self._renderer.draw_image(gc, slice_x.start + ox, int(self.height) - slice_y.stop + oy, img[: : -1])", - "docstring": "Save the current canvas as an image and apply post processing. The *post_processing* function:: def post_processing(image, dpi): # ny, nx, depth = image.shape # image (numpy array) has RGBA channels and has a depth of 4. ... # create a new_image (numpy array of 4 channels, size can be # different). The resulting image may have offsets from # lower-left corner of the original image return new_image, offset_x, offset_y The saved renderer is restored and the returned image from post_processing is plotted (using draw_image) on it.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_agg.py", - "ast_data": "FunctionDef name:stop_filter arguments arg:self arg:post_processing Assign Call call:asarray Assign Call call:_get_nonzero_slices Assign Assign Call call:pop If Assign Call call:post_processing Assign Call call:new_gc If Compare op:Eq Assign Call call:asarray" - }, - { - "library": "pytorch", - "name": "restore_global_state", - "source_code": "@contextlib.contextmanager def restore_global_state(self): prior_global_state = self.tracing_context.global_context.copy_graphstate() current_global_state: dict[str, tuple[Any, bool]] = {} self.save_global_state(out = current_global_state) try: self.tracing_context.global_context.restore_graphstate(prior_global_state) yield finally: self.tracing_context.global_context.restore_graphstate(GlobalContextCheckpointState(current_global_state))", - "docstring": "Momentarily restores the global state to what it was prior to tracing the current output", - "type": "method", - "file_path": "pytorch\\torch\\_dynamo\\output_graph.py", - "ast_data": "FunctionDef name:restore_global_state arguments arg:self Assign Call call:copy_graphstate Try" - }, - { - "library": "pytorch", - "name": "OutputAdaptStep", - "source_code": "@runtime_checkable class OutputAdaptStep(Protocol): def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None = None) -> Any: ...", - "docstring": "A protocol that defines a step in the output adapting process. The output adapting process is a sequence of steps that are applied to the PyTorch model outputs to transform them into the outputs format produced by the exported ONNX model. Each step takes the PyTorch model outputs as arguments and returns the transformed outputs. This serves as a base formalized construct for the transformation done to model output signature by any individual component in the exporter.", - "type": "class", - "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py", - "ast_data": "ClassDef name:OutputAdaptStep FunctionDef name:apply arguments arg:self arg:model_outputs type:Any arg:model type:torch.nn.Module | Callable | torch_export.ExportedProgram | None" - }, - { - "library": "salmon", - "name": "START", - "source_code": "@route_like(handlers.log.START) @stateless @nolocking def START(message, to = None, host = None): logging.debug('MESSAGE to %s@%s added to queue.', to, host) q = queue.Queue('run/queue') q.push(message)", - "docstring": "@stateless and routes however handlers.log.START routes (everything). Has @nolocking, but that's alright since it's just writing to a Maildir.", - "type": "function", - "file_path": "salmon\\salmon\\handlers\\queue.py", - "ast_data": "FunctionDef name:START arguments arg:message arg:to arg:host Call call:route_like Assign Call call:Queue" - }, - { - "library": "django", - "name": "get_date_list", - "source_code": "def get_date_list(self, queryset, date_type = None, ordering = 'ASC'): date_field = self.get_date_field() allow_empty = self.get_allow_empty() if date_type is None: date_type = self.get_date_list_period() if self.uses_datetime_field: date_list = queryset.datetimes(date_field, date_type, ordering) else: date_list = queryset.dates(date_field, date_type, ordering) if date_list is not None and (not date_list) and (not allow_empty): raise Http404(_('No %(verbose_name_plural)s available') % {'verbose_name_plural': queryset.model._meta.verbose_name_plural}) return date_list", - "docstring": "Get a date list by calling , checking along the way for empty lists that aren't allowed.", - "type": "method", - "file_path": "django\\django\\views\\generic\\dates.py", - "ast_data": "FunctionDef name:get_date_list arguments arg:self arg:queryset arg:date_type arg:ordering Assign Call call:get_date_field Assign Call call:get_allow_empty If Compare op:Is Assign Call call:get_date_list_period If Assign Call call:datetimes Assign Call call:dates If BoolOp Compare op:IsNot Raise raises:Http404(_('No %(verbose_name_plural)s available') % {'verbose_name_plural': queryset.model._meta.verbose_name_plural}) Return return:yes" - }, - { - "library": "tensorflow", - "name": "is_ref", - "source_code": "def is_ref(x): return isinstance(x, variables_module.Variable) or (isinstance(x, module.Module) and hasattr(x, 'dtype') and hasattr(x, 'shape'))", - "docstring": "Evaluates if the object has reference semantics. An object is deemed \"reference\" if it is a instance or is derived from a with and properties. Args: x: Any object. Returns: is_ref: Python indicating input is has nonreference semantics, i.e., is a or a with and properties.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py", - "ast_data": "FunctionDef name:is_ref arguments arg:x Return return:yes" - }, - { - "library": "scipy", - "name": "from_cython", - "source_code": "@classmethod def from_cython(cls, module, name, user_data = None, signature = None): try: function = module.__pyx_capi__[name] except AttributeError as e: message = 'Given module is not a Cython module with __pyx_capi__ attribute' raise ValueError(message) from e except KeyError as e: message = f'No function {name!r} found in __pyx_capi__ of the module' raise ValueError(message) from e return cls(function, user_data, signature)", - "docstring": "Create a low-level callback function from an exported Cython function. Parameters ---------- module : module Cython module where the exported function resides name : str Name of the exported function user_data : {PyCapsule, ctypes void pointer, cffi void pointer}, optional User data to pass on to the callback function. signature : str, optional Signature of the function. If omitted, determined from *function*.", - "type": "method", - "file_path": "scipy\\scipy\\_lib\\_ccallback.py", - "ast_data": "FunctionDef name:from_cython arguments arg:cls arg:module arg:name arg:user_data arg:signature Try Assign ExceptHandler Assign Raise raises:ValueError(message) ExceptHandler Assign Raise raises:ValueError(message) Return return:yes" - }, - { - "library": "tensorflow", - "name": "seekable", - "source_code": "def seekable(self): return True", - "docstring": "Returns True as FileIO supports random access ops of seek()/tell()", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", - "ast_data": "FunctionDef name:seekable arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "friendly_debug_info", - "source_code": "def friendly_debug_info(v: object) -> Argument: if isinstance(v, torch.Tensor): return f'Tensor({v.shape}, grad = {v.requires_grad}, dtype = {v.dtype})' else: return str(v)", - "docstring": "Helper function to print out debug info in a friendly way.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\pipelining\\_debug.py", - "ast_data": "FunctionDef name:friendly_debug_info arguments arg:v type:object If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "from_row_starts", - "source_code": "@classmethod def from_row_starts(cls, row_starts, nvals, validate = True, dtype = None, dtype_hint = None): if not isinstance(validate, bool): raise TypeError('validate must have type bool') with ops.name_scope(None, 'RowPartitionFromRowStarts', [row_starts]): row_starts = cls._convert_row_partition(row_starts, 'row_starts', dtype_hint = dtype_hint, dtype = dtype) row_starts.shape.assert_has_rank(1) nvals = math_ops.cast(nvals, row_starts.dtype) if validate: msg = 'Arguments to from_row_starts do not form a valid RaggedTensor' checks = [check_ops.assert_rank(row_starts, 1, message = msg), _assert_zero(row_starts[: 1], message = msg), _assert_monotonic_increasing(row_starts, message = msg), check_ops.assert_less_equal(row_starts[-1:], nvals, message = msg)] row_starts = control_flow_ops.with_dependencies(checks, row_starts) row_splits = array_ops.concat([row_starts, [nvals]], axis = 0) return cls(row_splits = row_splits, nvals = nvals, internal = _row_partition_factory_key)", - "docstring": "Creates a with rows partitioned by . Equivalent to: . Args: row_starts: A 1-D integer tensor with shape . Must be nonnegative and sorted in ascending order. If , then must be zero. nvals: A scalar tensor indicating the number of values. validate: If true, then use assertions to check that the arguments form a valid . dtype: Optional dtype for the RowPartition. If missing, the type is inferred from the type of , dtype_hint, or tf.int64. dtype_hint: Optional dtype for the RowPartition, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to is not possible, this argument has no effect. Returns: A .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", - "ast_data": "FunctionDef name:from_row_starts arguments arg:cls arg:row_starts arg:nvals arg:validate arg:dtype arg:dtype_hint If Raise raises:TypeError('validate must have type bool') With Assign Call call:_convert_row_partition Assign Call call:cast If Assign Assign Assign Call call:with_dependencies Assign Call call:concat Return return:yes" - }, - { - "library": "pandas", - "name": "MergeError", - "source_code": "class MergeError(ValueError): pass", - "docstring": "Exception raised when merging data. Subclass of ``. See Also -------- DataFrame.join : For joining DataFrames on their indexes. merge : For merging two DataFrames on a common set of keys. Examples -------- >>> left = pd.DataFrame( ... {\"a\": [\"a\", \"b\", \"b\", \"d\"], \"b\": [\"cat\", \"dog\", \"weasel\", \"horse\"]}, ... index=range(4), ... ) >>> right = pd.DataFrame( ... {\"a\": [\"a\", \"b\", \"c\", \"d\"], \"c\": [\"meow\", \"bark\", \"chirp\", \"nay\"]}, ... index=range(4), ... ).set_index(\"a\") >>> left.join( ... right, ... on=\"a\", ... validate=\"one_to_one\", ... ) Traceback (most recent call last): MergeError: Merge keys are not unique in left dataset; not a one-to-one merge", - "type": "class", - "file_path": "pandas\\pandas\\errors\\__init__.py", - "ast_data": "ClassDef name:MergeError" - }, - { - "library": "scipy", - "name": "__new__", - "source_code": "def __new__(cls, *system, **kwargs): if len(system) = = 1 and isinstance(system[0], LinearTimeInvariant): return system[0].to_zpk() if cls is ZerosPolesGain: if kwargs.get('dt') is None: return ZerosPolesGainContinuous.__new__(ZerosPolesGainContinuous, *system, **kwargs) else: return ZerosPolesGainDiscrete.__new__(ZerosPolesGainDiscrete, *system, **kwargs) return super().__new__(cls)", - "docstring": "Handle object conversion if input is an instance of", - "type": "method", - "file_path": "scipy\\scipy\\signal\\_ltisys.py", - "ast_data": "FunctionDef name:__new__ arguments arg:cls vararg:system kwarg:kwargs If BoolOp Compare op:Eq Call call:isinstance Return return:yes If Compare op:Is If Compare op:Is Return return:yes Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "variable_capturing_scope", - "source_code": "def variable_capturing_scope(next_creator, **kwds): enable_variable_lifting = kwds.get('experimental_enable_variable_lifting') if enable_variable_lifting is None: enable_variable_lifting = True if not enable_variable_lifting: return next_creator(**kwds) v = UnliftedInitializerVariable(add_initializers_to = add_initializers_to, **kwds) created_variables.append(weakref.ref(v)) return v", - "docstring": "Creates UnliftedInitializerVariables and saves references to them.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py", - "ast_data": "FunctionDef name:variable_capturing_scope arguments arg:next_creator kwarg:kwds Assign Call call:get If Compare op:Is Assign If Return return:yes Assign Call call:UnliftedInitializerVariable Return return:yes" - }, - { - "library": "django", - "name": "touch", - "source_code": "def touch(self, key, timeout = DEFAULT_TIMEOUT, version = None): raise NotImplementedError('subclasses of BaseCache must provide a touch() method')", - "docstring": "Update the key's expiry time using timeout. Return True if successful or False if the key does not exist.", - "type": "method", - "file_path": "django\\django\\core\\cache\\backends\\base.py", - "ast_data": "FunctionDef name:touch arguments arg:self arg:key arg:timeout arg:version Raise raises:NotImplementedError('subclasses of BaseCache must provide a touch() method')" - }, - { - "library": "matplotlib", - "name": "get_height_char", - "source_code": "def get_height_char(self, c, isord = False): if not isord: c = ord(c) return self._metrics[c].bbox[-1]", - "docstring": "Get the bounding box (ink) height of character *c* (space is 0).", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\_afm.py", - "ast_data": "FunctionDef name:get_height_char arguments arg:self arg:c arg:isord If Assign Call call:ord Return return:yes" - }, - { - "library": "pytorch", - "name": "mark", - "source_code": "def mark(msg): return _nvtx.markA(msg)", - "docstring": "Describe an instantaneous event that occurred at some point. Args: msg (str): ASCII message to associate with the event.", - "type": "function", - "file_path": "pytorch\\torch\\cuda\\nvtx.py", - "ast_data": "FunctionDef name:mark arguments arg:msg Return return:yes" - }, - { - "library": "django", - "name": "can_fast_delete", - "source_code": "def can_fast_delete(self, *args, **kwargs): return False", - "docstring": "We always want to load the objects into memory so that we can display them to the user in confirm page.", - "type": "method", - "file_path": "django\\django\\contrib\\admin\\utils.py", - "ast_data": "FunctionDef name:can_fast_delete arguments arg:self vararg:args kwarg:kwargs Return return:yes" - }, - { - "library": "pytorch", - "name": "check_module_version_greater_or_equal", - "source_code": "def check_module_version_greater_or_equal(module, req_version_tuple, error_if_malformed = True): try: version_strs = module.__version__.split('.') module_version = tuple((type(req_field)(version_strs[idx]) for idx, req_field in enumerate(req_version_tuple))) requirement_is_met = module_version > = req_version_tuple except Exception as e: message = f\"'{module.__name__}' module version string is malformed '{module.__version__}' and cannot be compared with tuple {str(req_version_tuple)}\" if error_if_malformed: raise RuntimeError(message) from e else: warnings.warn(message + ', but continuing assuming that requirement is met') requirement_is_met = True return requirement_is_met", - "docstring": "Check if a module's version satisfies requirements Usually, a module's version string will be like 'x.y.z', which would be represented as a tuple (x, y, z), but sometimes it could be an unexpected format. If the version string does not match the given tuple's format up to the length of the tuple, then error and exit or emit a warning. Args: module: the module to check the version of req_version_tuple: tuple (usually of ints) representing the required version error_if_malformed: whether we should exit if module version string is malformed Returns: requirement_is_met: bool", - "type": "function", - "file_path": "pytorch\\torch\\serialization.py", - "ast_data": "FunctionDef name:check_module_version_greater_or_equal arguments arg:module arg:req_version_tuple arg:error_if_malformed Try Assign Call call:split Assign Call call:tuple Assign Compare op:GtE ExceptHandler Assign If Raise raises:RuntimeError(message) Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "register_user_hooks", - "source_code": "def register_user_hooks(self, pre_fw_hook: Optional[Callable] = None, post_fw_hook: Optional[Callable] = None, pre_bw_hook: Optional[Callable] = None, post_bw_hook: Optional[Callable] = None): def set_hook(hook, user_hook, hook_name): if hook is not None and user_hook is not None: raise AssertionError(f'Only one {hook_name} can be registered at a time Clear the existing hook by calling ``clear_user_hooks`` before registering a new one') return hook self._user_pre_fw_hook = set_hook(pre_fw_hook, self._user_pre_fw_hook, 'pre_fw_hook') self._user_post_fw_hook = set_hook(post_fw_hook, self._user_post_fw_hook, 'post_fw_hook') self._user_pre_bw_hook = set_hook(pre_bw_hook, self._user_pre_bw_hook, 'pre_bw_hook') self._user_post_bw_hook = set_hook(post_bw_hook, self._user_post_bw_hook, 'post_bw_hook')", - "docstring": "Registers user-specified hooks to be called before/after the forward/backward pass for each module tracked by the `` attribute when each of the hooks is called. Hooks are intended to be used as markers only not to modify the inputs/outputs.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\_tools\\mod_tracker.py", - "ast_data": "FunctionDef name:register_user_hooks arguments arg:self arg:pre_fw_hook type:Optional[Callable] arg:post_fw_hook type:Optional[Callable] arg:pre_bw_hook type:Optional[Callable] arg:post_bw_hook type:Optional[Callable] FunctionDef name:set_hook arguments arg:hook arg:user_hook arg:hook_name If BoolOp Compare op:IsNot Compare op:IsNot Raise raises:AssertionError(f'Only one {hook_name} can be registered at a time Clear the existing hook by calling ``clear_user_hooks`` before registering a new one') Return return:yes Assign Call call:set_hook Assign Call call:set_hook Assign Call call:set_hook Assign Call call:set_hook" - }, - { - "library": "pandas", - "name": "iget_values", - "source_code": "def iget_values(self, i: int) -> ArrayLike: block = self.blocks[self.blknos[i]] values = block.iget(self.blklocs[i]) return values", - "docstring": "Return the data for column i as the values (ndarray or ExtensionArray). Warning! The returned array is a view but doesn't handle Copy-on-Write, so this should be used with caution.", - "type": "method", - "file_path": "pandas\\pandas\\core\\internals\\managers.py", - "ast_data": "FunctionDef name:iget_values arguments arg:self arg:i type:int Assign Assign Call call:iget Return return:yes" - }, - { - "library": "django", - "name": "get_related_models_tuples", - "source_code": "def get_related_models_tuples(model): return {(rel_mod._meta.app_label, rel_mod._meta.model_name) for rel_mod in _get_related_models(model)}", - "docstring": "Return a list of typical (app_label, model_name) tuples for all related models for the given model.", - "type": "function", - "file_path": "django\\django\\db\\migrations\\state.py", - "ast_data": "FunctionDef name:get_related_models_tuples arguments arg:model Return return:yes" - }, - { - "library": "tensorflow", - "name": "configure_tpu_version", - "source_code": "def configure_tpu_version(self, version, restart_type = 'always'): def configure_worker(worker): ip_address = worker['ipAddress'] url = (_VERSION_SWITCHER_ENDPOINT + '/{}?restartType = {}').format(ip_address, version, restart_type) req = urllib.request.Request(url, data = b'') try: urllib.request.urlopen(req) except urllib.error.HTTPError as e: status_code = e.code if status_code = = 404: raise Exception('Tensorflow version {} is not available on Cloud TPU, try a previous nightly version or refer to https: //cloud.google.com/tpu/docs/release-notes for the latest official version.'.format(version)) else: raise Exception('Failed to configure worker {}'.format(ip_address)) workers = self.network_endpoints() with futures.ThreadPoolExecutor(max_workers = len(workers)) as executor: results = executor.map(configure_worker, workers) for result in results: if result: result.result()", - "docstring": "Configure TPU software version. Args: version (string): Version of software to configure the TPU with. restart_type (string): Restart behaviour when switching versions, defaults to always restart. Options are 'always', 'ifNeeded'.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py", - "ast_data": "FunctionDef name:configure_tpu_version arguments arg:self arg:version arg:restart_type FunctionDef name:configure_worker arguments arg:worker Assign Assign Call call:format Assign Call call:Request Try ExceptHandler Assign If Compare op:Eq Raise raises:Exception('Tensorflow version {} is not available on Cloud TPU, try a previous nightly version or refer to https://cloud.google.com/tpu/docs/release-notes for the latest official version.'.format(version)) Raise raises:Exception('Failed to configure worker {}'.format(ip_address)) Assign Call call:network_endpoints With Assign Call call:map For If" - }, - { - "library": "numpy", - "name": "copyto", - "source_code": "@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto) def copyto(dst, src, casting = None, where = None): return (dst, src, where)", - "docstring": "copyto(dst, src, casting='same_kind', where=True) Copies values from one array to another, broadcasting as necessary. Raises a TypeError if the rule is violated, and if is provided, it selects which elements to copy. Parameters ---------- dst : ndarray The array into which values are copied. src : array_like The array from which values are copied. casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional Controls what kind of data casting may occur when copying. * 'no' means the data types should not be cast at all. * 'equiv' means only byte-order changes are allowed. * 'safe' means only casts which can preserve values are allowed. * 'same_kind' means only safe casts or casts within a kind, like float64 to float32, are allowed. * 'unsafe' means any data conversions may be done. where : array_like of bool, optional A boolean array which is broadcasted to match the dimensions of , and selects elements to copy from to wherever it contains the value True. Examples -------- >>> import numpy as np >>> A = np.array([4, 5, 6]) >>> B = [1, 2, 3] >>> np.copyto(A, B) >>> A array([1, 2, 3]) >>> A = np.array([[1, 2, 3], [4, 5, 6]]) >>> B = [[4, 5, 6], [7, 8, 9]] >>> np.copyto(A, B) >>> A array([[4, 5, 6], [7, 8, 9]])", - "type": "function", - "file_path": "numpy\\numpy\\_core\\multiarray.py", - "ast_data": "FunctionDef name:copyto arguments arg:dst arg:src arg:casting arg:where Call call:array_function_from_c_func_and_dispatcher Return return:yes" - }, - { - "library": "pytorch", - "name": "named_buffers", - "source_code": "@compatibility(is_backward_compatible = False) def named_buffers(self) -> Iterator[tuple[str, torch.Tensor]]: non_persistent_buffers = set(self.graph_signature.non_persistent_buffers) for buffer_name in self.graph_signature.buffers: if buffer_name in non_persistent_buffers: yield (buffer_name, self.constants[buffer_name]) else: yield (buffer_name, self.state_dict[buffer_name])", - "docstring": "Returns an iterator over original module buffers, yielding both the name of the buffer as well as the buffer itself.", - "type": "method", - "file_path": "pytorch\\torch\\export\\exported_program.py", - "ast_data": "FunctionDef name:named_buffers arguments arg:self Call call:compatibility Assign Call call:set For If Compare op:In" - }, - { - "library": "authlib", - "name": "validate_revocation_endpoint", - "source_code": "def validate_revocation_endpoint(self): url = self.get('revocation_endpoint') if url and (not is_secure_transport(url)): raise ValueError('\"revocation_endpoint\" MUST use \"https\" scheme')", - "docstring": "OPTIONAL. URL of the authorization server's OAuth 2.0 revocation endpoint [RFC7009].", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py", - "ast_data": "FunctionDef name:validate_revocation_endpoint arguments arg:self Assign Call call:get If BoolOp Raise raises:ValueError('\"revocation_endpoint\" MUST use \"https\" scheme')" - }, - { - "library": "scipy", - "name": "Easom", - "source_code": "class Easom(Benchmark): def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N)) self.global_optimum = [[pi for _ in range(self.N)]] self.fglob = -1.0 def fun(self, x, *args): self.nfev + = 1 a = (x[0] - pi) ** 2 + (x[1] - pi) ** 2 return -cos(x[0]) * cos(x[1]) * exp(-a)", - "docstring": "Easom objective function. This class defines the Easom [1]_ global optimization problem. This is a a multimodal minimization problem defined as follows: .. math:: f_{\\text{Easom}}({x}) = a - \\frac{a}{e^{b \\sqrt{\\frac{\\sum_{i=1}^{n} x_i^{2}}{n}}}} + e - e^{\\frac{\\sum_{i=1}^{n} \\cos\\left(c x_i\\right)} {n}} Where, in this exercise, :math: and :math:. Here, :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO Gavana website disagrees with Jamil, etc. Gavana equation in docstring is totally wrong.", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_E.py", - "ast_data": "ClassDef name:Easom FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "partitioned_dim_sizes", - "source_code": "@property def partitioned_dim_sizes(self): return self._partitioned_dim_sizes", - "docstring": "The partitioned dimension sizes for this shape. Returns: A of 0-D or 1-D integer .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py", - "ast_data": "FunctionDef name:partitioned_dim_sizes arguments arg:self Return return:yes" - }, - { - "library": "numpy", - "name": "__rfloordiv__", - "source_code": "def __rfloordiv__(self, other): return floor_divide(other, self)", - "docstring": "Divide self into other, and return a new masked array.", - "type": "method", - "file_path": "numpy\\numpy\\ma\\core.py", - "ast_data": "FunctionDef name:__rfloordiv__ arguments arg:self arg:other Return return:yes" - }, - { - "library": "pytorch", - "name": "combine_two_partitions", - "source_code": "def combine_two_partitions(partition_0: Partition, partition_1: Partition, partitions: list[Partition]) -> None: partition = Partition(len(partitions)) partition.nodes = partition_0.nodes.union(partition_1.nodes) partition.recalculate_mem_size() partitions.append(partition) partitions.remove(partition_0) partitions.remove(partition_1) reorganize_partitions(partitions) return", - "docstring": "Given a list of partitions and its two partitions, combine these two partitions into a new one appending to the partitions and remove the previous two partitions from the list of partitions", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py", - "ast_data": "FunctionDef name:combine_two_partitions arguments arg:partition_0 type:Partition arg:partition_1 type:Partition arg:partitions type:list[Partition] Assign Call call:Partition Assign Call call:union Return return:no" - }, - { - "library": "django", - "name": "void_output", - "source_code": "def void_output(func, argtypes, errcheck = True, cpl = False): if argtypes: func.argtypes = argtypes if errcheck: func.restype = c_int func.errcheck = partial(check_errcode, cpl = cpl) else: func.restype = None return func", - "docstring": "For functions that don't only return an error code that needs to be examined.", - "type": "function", - "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\generation.py", - "ast_data": "FunctionDef name:void_output arguments arg:func arg:argtypes arg:errcheck arg:cpl If Assign If Assign Assign Call call:partial Assign Return return:yes" - }, - { - "library": "pygame", - "name": "pixels_red", - "source_code": "def pixels_red(surface): return numpy.array(surface.get_view('R'), copy = False)", - "docstring": "pygame.surfarray.pixels_red(Surface): return array Reference pixel red into a 2d array. Create a new 2D array that directly references the red values in a Surface. Any changes to the array will affect the pixels in the Surface. This is a fast operation since no data is copied. This can only work on 24-bit or 32-bit Surfaces. The Surface this array references will remain locked for the lifetime of the array.", - "type": "function", - "file_path": "pygame\\src_py\\surfarray.py", - "ast_data": "FunctionDef name:pixels_red arguments arg:surface Return return:yes" - }, - { - "library": "pytorch", - "name": "load_state_dict", - "source_code": "@deprecated('`load_state_dict` is deprecated and will be removed in future versions. Please use `load` instead.', category = FutureWarning) def load_state_dict(state_dict: dict[str, Any], storage_reader: StorageReader, process_group: Optional[dist.ProcessGroup] = None, coordinator_rank: int = 0, no_dist: bool = False, planner: Optional[LoadPlanner] = None) -> None: storage_reader.reset() with _profile(): return _load_state_dict(state_dict, storage_reader, process_group, coordinator_rank, no_dist, planner)", - "docstring": "This method is deprecated. Please switch to 'load'.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\checkpoint\\state_dict_loader.py", - "ast_data": "FunctionDef name:load_state_dict arguments arg:state_dict type:dict[str, Any] arg:storage_reader type:StorageReader arg:process_group type:Optional[dist.ProcessGroup] arg:coordinator_rank type:int arg:no_dist type:bool arg:planner type:Optional[LoadPlanner] Call call:deprecated With Return return:yes" - }, - { - "library": "matplotlib", - "name": "use", - "source_code": "def use(backend, *, force = True): name = rcsetup.validate_backend(backend) if rcParams._get_backend_or_none() = = name: pass else: plt = sys.modules.get('matplotlib.pyplot') if plt is not None: try: plt.switch_backend(name) except ImportError: if force: raise else: rcParams['backend'] = backend rcParams['backend_fallback'] = False", - "docstring": "Select the backend used for rendering and GUI integration. If pyplot is already imported, is used and if the new backend is different than the current backend, all Figures will be closed. Parameters ---------- backend : str The backend to switch to. This can either be one of the standard backend names, which are case-insensitive: - interactive backends: GTK3Agg, GTK3Cairo, GTK4Agg, GTK4Cairo, MacOSX, nbAgg, notebook, QtAgg, QtCairo, TkAgg, TkCairo, WebAgg, WX, WXAgg, WXCairo, Qt5Agg, Qt5Cairo - non-interactive backends: agg, cairo, pdf, pgf, ps, svg, template or a string of the form: `ImportErrorbackends` matplotlib.get_backend matplotlib.pyplot.switch_backend", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\__init__.py", - "ast_data": "FunctionDef name:use arguments arg:backend Assign Call call:validate_backend If Compare op:Eq Assign Call call:get If Compare op:IsNot Try ExceptHandler If Raise Assign Assign" - }, - { - "library": "scipy", - "name": "iterate_all", - "source_code": "def iterate_all(self): if self.disp: logging.info('Splitting first generation') while not self.stop_global: if self.break_routine: break self.iterate() self.stopping_criteria() if not self.minimize_every_iter: if not self.break_routine: self.find_minima() self.res.nit = self.iters_done self.fn = self.HC.V.nfev", - "docstring": "Construct for iterations. If uniform sampling is used, every iteration adds 'n' sampling points. Iterations if a stopping criteria (e.g., sampling points or processing time) has been met.", - "type": "method", - "file_path": "scipy\\scipy\\optimize\\_shgo.py", - "ast_data": "FunctionDef name:iterate_all arguments arg:self If While If If If Assign Assign" - }, - { - "library": "numpy", - "name": "hermemul", - "source_code": "def hermemul(c1, c2): [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): c = c2 xs = c1 else: c = c1 xs = c2 if len(c) = = 1: c0 = c[0] * xs c1 = 0 elif len(c) = = 2: c0 = c[0] * xs c1 = c[1] * xs else: nd = len(c) c0 = c[-2] * xs c1 = c[-1] * xs for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 c0 = hermesub(c[-i] * xs, c1 * (nd - 1)) c1 = hermeadd(tmp, hermemulx(c1)) return hermeadd(c0, hermemulx(c1))", - "docstring": "Multiply one Hermite series by another. Returns the product of two Hermite series * . The arguments are sequences of coefficients, from lowest order \"term\" to highest, e.g., [1,2,3] represents the series ``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Of Hermite series coefficients representing their product. See Also -------- hermeadd, hermesub, hermemulx, hermediv, hermepow Notes ----- In general, the (polynomial) product of two C-series results in terms that are not in the Hermite polynomial basis set. Thus, to express the product as a Hermite series, it is necessary to \"reproject\" the product onto said basis set, which may produce \"unintuitive\" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial.hermite_e import hermemul >>> hermemul([1, 2, 3], [0, 1, 2]) array([14., 15., 28., 7., 6.])", - "type": "function", - "file_path": "numpy\\numpy\\polynomial\\hermite_e.py", - "ast_data": "FunctionDef name:hermemul arguments arg:c1 arg:c2 Assign Call call:as_series If Compare op:Gt Assign Assign Assign Assign If Compare op:Eq Assign Assign If Compare op:Eq Assign Assign Assign Call call:len Assign Assign For Call call:range Assign Assign Assign Call call:hermesub Assign Call call:hermeadd Return return:yes" - }, - { - "library": "kornia", - "name": "RgbToRgba", - "source_code": "class RgbToRgba(Module): ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1] ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 4, -1, -1] def __init__(self, alpha_val: Union[float, Tensor]) -> None: super().__init__() self.alpha_val = alpha_val def forward(self, image: Tensor) -> Tensor: return rgb_to_rgba(image, self.alpha_val)", - "docstring": "Convert an image from RGB to RGBA. Add an alpha channel to existing RGB image. Args: alpha_val: A float number for the alpha value or a tensor of shape :math:. Returns: Tensor: RGBA version of the image with shape :math:. Shape: - image: :math: - output: :math: .. note:: The current functionality is NOT supported by Torchscript. Example: >>> input = torch.rand(2, 3, 4, 5) >>> rgba = RgbToRgba(1.) >>> output = rgba(input) # 2x4x4x5", - "type": "class", - "file_path": "kornia\\kornia\\color\\rgb.py", - "ast_data": "ClassDef name:RgbToRgba FunctionDef name:__init__ arguments arg:self arg:alpha_val type:Union[float, Tensor] Assign FunctionDef name:forward arguments arg:self arg:image type:Tensor Return return:yes" - }, - { - "library": "tensorflow", - "name": "convert_inner_node_data", - "source_code": "def convert_inner_node_data(nested, wrap = False): def _is_serialized_node_data(nested): if isinstance(nested, list) and len(nested) in [3, 4] and isinstance(nested[0], str): return True return False def _is_atomic_nested(nested): if isinstance(nested, ListWrapper): return True if _is_serialized_node_data(nested): return True return not nest.is_nested(nested) def _convert_object_or_list(nested): if wrap: if isinstance(nested, ListWrapper): return nested if _is_serialized_node_data(nested): return ListWrapper(nested) return nested else: if isinstance(nested, ListWrapper): return nested.as_list() return nested return map_structure_with_atomic(_is_atomic_nested, _convert_object_or_list, nested)", - "docstring": "Either wraps or unwraps innermost node data lists in objects. Args: nested: A nested data structure. wrap: If , wrap innermost lists in objects. If , unwraps objects into lists. Returns: Structure of same type as nested, with lists wrapped/unwrapped.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py", - "ast_data": "FunctionDef name:convert_inner_node_data arguments arg:nested arg:wrap FunctionDef name:_is_serialized_node_data arguments arg:nested If BoolOp Call call:isinstance Compare op:In Call call:isinstance Return return:yes Return return:yes FunctionDef name:_is_atomic_nested arguments arg:nested If Call call:isinstance Return return:yes If Call call:_is_serialized_node_data Return return:yes Return return:yes FunctionDef name:_convert_object_or_list arguments arg:nested If If Call call:isinstance Return return:yes If Call call:_is_serialized_node_data Return return:yes Return return:yes If Call call:isinstance Return return:yes Return return:yes Return return:yes" - }, - { - "library": "scrapy", - "name": "crawl", - "source_code": "def crawl(self, crawler_or_spidercls: type[Spider] | str | Crawler, *args: Any, **kwargs: Any) -> asyncio.Future[None]: if isinstance(crawler_or_spidercls, Spider): raise ValueError('The crawler_or_spidercls argument cannot be a spider object, it must be a spider class (or a Crawler object)') if not is_asyncio_reactor_installed(): raise RuntimeError('AsyncCrawlerRunner requires AsyncioSelectorReactor.') crawler = self.create_crawler(crawler_or_spidercls) return self._crawl(crawler, *args, **kwargs)", - "docstring": "Run a crawler with the provided arguments. It will call the given Crawler's :meth: method, while keeping track of it so it can be stopped later. If `~scrapy.crawler.Crawler~asyncio.Future~scrapy.crawler.Crawler~scrapy.spiders.Spider` subclass or string :param args: arguments to initialize the spider :param kwargs: keyword arguments to initialize the spider", - "type": "method", - "file_path": "scrapy\\scrapy\\crawler.py", - "ast_data": "FunctionDef name:crawl arguments arg:self arg:crawler_or_spidercls type:type[Spider] | str | Crawler vararg:args kwarg:kwargs If Call call:isinstance Raise raises:ValueError('The crawler_or_spidercls argument cannot be a spider object, it must be a spider class (or a Crawler object)') If Raise raises:RuntimeError('AsyncCrawlerRunner requires AsyncioSelectorReactor.') Assign Call call:create_crawler Return return:yes" - }, - { - "library": "django", - "name": "exists", - "source_code": "def exists(self, session_key): raise NotImplementedError('subclasses of SessionBase must provide an exists() method')", - "docstring": "Return True if the given session_key already exists.", - "type": "method", - "file_path": "django\\django\\contrib\\sessions\\backends\\base.py", - "ast_data": "FunctionDef name:exists arguments arg:self arg:session_key Raise raises:NotImplementedError('subclasses of SessionBase must provide an exists() method')" - }, - { - "library": "pytorch", - "name": "tree_unflatten", - "source_code": "def tree_unflatten(leaves: Iterable[Any], treespec: TreeSpec) -> PyTree: if not _is_pytreespec_instance(treespec): raise TypeError(f'tree_unflatten(leaves, treespec): Expected `treespec` to be instance of PyTreeSpec but got item of type {type(treespec)}.') return optree.tree_unflatten(treespec, leaves)", - "docstring": "Reconstruct a pytree from the treespec and the leaves. The inverse of :func:. >>> tree = {\"b\": (2, [3, 4]), \"a\": 1, \"c\": None, \"d\": 5} >>> leaves, treespec = tree_flatten(tree) >>> tree == tree_unflatten(leaves, treespec) True Args: leaves (iterable): The list of leaves to use for reconstruction. The list must match the number of leaves of the treespec. treespec (TreeSpec): The treespec to reconstruct. Returns: The reconstructed pytree, containing the ``.", - "type": "function", - "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py", - "ast_data": "FunctionDef name:tree_unflatten arguments arg:leaves type:Iterable[Any] arg:treespec type:TreeSpec If Raise raises:TypeError(f'tree_unflatten(leaves, treespec): Expected `treespec` to be instance of PyTreeSpec but got item of type {type(treespec)}.') Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, graph, run_metadata): self._graph = graph self._run_metadata = run_metadata self._string_table = StringTable() self._functions = Functions(self._string_table) self._locations = Locations(self._functions)", - "docstring": "Constructor. Args: graph: A instance. run_metadata: A list of objects.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:graph arg:run_metadata Assign Assign Assign Call call:StringTable Assign Call call:Functions Assign Call call:Locations" - }, - { - "library": "tensorflow", - "name": "call_for_each_replica", - "source_code": "def call_for_each_replica(self, fn, args = (), kwargs = None): _require_cross_replica_or_default_context_extended(self) if kwargs is None: kwargs = {} with self._container_strategy().scope(): return self._call_for_each_replica(fn, args, kwargs)", - "docstring": "Run once per replica. may call to access methods such as and . is used to communicate between the replicas and re-enter the cross-replica context. All replicas pause their execution having encountered a call. After that the -function is executed. Its results are then unwrapped and given back to each replica call. After that execution resumes until is complete or encounters another . Example: Args: fn: function to run (will be run once per replica). args: Tuple or list with positional arguments for . kwargs: Dict with keyword arguments for . Returns: Merged return value of across all replicas.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", - "ast_data": "FunctionDef name:call_for_each_replica arguments arg:self arg:fn arg:args arg:kwargs If Compare op:Is Assign With Return return:yes" - }, - { - "library": "cryptography", - "name": "extensions", - "source_code": "@property @abc.abstractmethod def extensions(self) -> Extensions: pass", - "docstring": "Returns an Extensions object containing a list of Revoked extensions.", - "type": "method", - "file_path": "cryptography\\src\\cryptography\\x509\\base.py", - "ast_data": "FunctionDef name:extensions arguments arg:self" - }, - { - "library": "matplotlib", - "name": "parse_fontconfig_pattern", - "source_code": "@lru_cache def parse_fontconfig_pattern(pattern): parser = _make_fontconfig_parser() try: parse = parser.parse_string(pattern) except ParseException as err: raise ValueError('\\n' + ParseException.explain(err, 0)) from None parser.reset_cache() props = {} if 'families' in parse: props['family'] = [*map(_family_unescape, parse['families'])] if 'sizes' in parse: props['size'] = [*parse['sizes']] for prop in parse.get('properties', []): if len(prop) = = 1: prop = _CONSTANTS[prop[0]] k, *v = prop props.setdefault(k, []).extend(map(_value_unescape, v)) return props", - "docstring": "Parse a fontconfig *pattern* into a dict that can initialize a object.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\_fontconfig_pattern.py", - "ast_data": "FunctionDef name:parse_fontconfig_pattern arguments arg:pattern Assign Call call:_make_fontconfig_parser Try Assign Call call:parse_string ExceptHandler Raise raises:ValueError('\\n' + ParseException.explain(err, 0)) Assign If Compare op:In Assign If Compare op:In Assign For Call call:get If Compare op:Eq Assign Assign Return return:yes" - }, - { - "library": "kornia", - "name": "angle_to_rotation_matrix", - "source_code": "def angle_to_rotation_matrix(angle: Tensor) -> Tensor: ang_rad = deg2rad(angle) cos_a: Tensor = cos(ang_rad) sin_a: Tensor = sin(ang_rad) return stack([cos_a, sin_a, -sin_a, cos_a], dim = -1).view(*angle.shape, 2, 2)", - "docstring": "Create a rotation matrix out of angles in degrees. Args: angle: tensor of angles in degrees, any shape :math:. Returns: tensor of rotation matrices with shape :math:. Example: >>> input = torch.rand(1, 3) # Nx3 >>> output = angle_to_rotation_matrix(input) # Nx3x2x2", - "type": "function", - "file_path": "kornia\\kornia\\geometry\\conversions.py", - "ast_data": "FunctionDef name:angle_to_rotation_matrix arguments arg:angle type:Tensor Assign Call call:deg2rad Return return:yes" - }, - { - "library": "cryptography", - "name": "public_key", - "source_code": "def public_key(self, key: CertificatePublicKeyTypes) -> CertificateBuilder: if not isinstance(key, (dsa.DSAPublicKey, rsa.RSAPublicKey, ec.EllipticCurvePublicKey, ed25519.Ed25519PublicKey, ed448.Ed448PublicKey, x25519.X25519PublicKey, x448.X448PublicKey)): raise TypeError('Expecting one of DSAPublicKey, RSAPublicKey, EllipticCurvePublicKey, Ed25519PublicKey, Ed448PublicKey, X25519PublicKey, or X448PublicKey.') if self._public_key is not None: raise ValueError('The public key may only be set once.') return CertificateBuilder(self._issuer_name, self._subject_name, key, self._serial_number, self._not_valid_before, self._not_valid_after, self._extensions)", - "docstring": "Sets the requestor's public key (as found in the signing request).", - "type": "method", - "file_path": "cryptography\\src\\cryptography\\x509\\base.py", - "ast_data": "FunctionDef name:public_key arguments arg:self arg:key type:CertificatePublicKeyTypes If Raise raises:TypeError('Expecting one of DSAPublicKey, RSAPublicKey, EllipticCurvePublicKey, Ed25519PublicKey, Ed448PublicKey, X25519PublicKey, or X448PublicKey.') If Compare op:IsNot Raise raises:ValueError('The public key may only be set once.') Return return:yes" - }, - { - "library": "matplotlib", - "name": "print_cycles", - "source_code": "def print_cycles(objects, outstream = sys.stdout, show_progress = False): import gc def print_path(path): for i, step in enumerate(path): next = path[(i + 1) % len(path)] outstream.write(' %s -- ' % type(step)) if isinstance(step, dict): for key, val in step.items(): if val is next: outstream.write(f'[{key!r}]') break if key is next: outstream.write(f'[key] = {val!r}') break elif isinstance(step, list): outstream.write('[%d]' % step.index(next)) elif isinstance(step, tuple): outstream.write('(tuple)') else: outstream.write(repr(step)) outstream.write(' ->\\n') outstream.write('\\n') def recurse(obj, start, all, current_path): if show_progress: outstream.write('%d\\r' % len(all)) all[id(obj)] = None referents = gc.get_referents(obj) for referent in referents: if referent is start: print_path(current_path) elif referent is objects or isinstance(referent, types.FrameType): continue elif id(referent) not in all: recurse(referent, start, all, current_path + [obj]) for obj in objects: outstream.write(f'Examining: {obj!r}\\n') recurse(obj, obj, {}, [])", - "docstring": "Print loops of cyclic references in the given *objects*. It is often useful to pass in `` to find the cycles that are preventing some objects from being garbage collected. Parameters ---------- objects A list of objects to find cycles in. outstream The stream for output. show_progress : bool If True, print the number of objects reached as they are found.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", - "ast_data": "FunctionDef name:print_cycles arguments arg:objects arg:outstream arg:show_progress FunctionDef name:print_path arguments arg:path For Call call:enumerate Assign If Call call:isinstance For Call call:items If Compare op:Is If Compare op:Is If Call call:isinstance If Call call:isinstance FunctionDef name:recurse arguments arg:obj arg:start arg:all arg:current_path If Assign Assign Call call:get_referents For If Compare op:Is If BoolOp Compare op:Is Call call:isinstance If Compare op:NotIn For" - }, - { - "library": "pandas", - "name": "after_nearest_workday", - "source_code": "def after_nearest_workday(dt: datetime) -> datetime: return next_workday(nearest_workday(dt))", - "docstring": "returns next workday after nearest workday needed for Boxing day or multiple holidays in a series", - "type": "function", - "file_path": "pandas\\pandas\\tseries\\holiday.py", - "ast_data": "FunctionDef name:after_nearest_workday arguments arg:dt type:datetime Return return:yes" - }, - { - "library": "pytorch", - "name": "str2bool", - "source_code": "def str2bool(v): if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.')", - "docstring": "ArgumentParser doesn't support type=bool. Thus, this helper method will convert from possible string types to True / False.", - "type": "function", - "file_path": "pytorch\\torch\\utils\\hipify\\hipify_python.py", - "ast_data": "FunctionDef name:str2bool arguments arg:v If Compare op:In Return return:yes If Compare op:In Return return:yes Raise raises:argparse.ArgumentTypeError('Boolean value expected.')" - }, - { - "library": "pytorch", - "name": "set_checkpoint_early_stop", - "source_code": "@contextlib.contextmanager def set_checkpoint_early_stop(enable: bool): global _enable_checkpoint_early_stop try: prev = _enable_checkpoint_early_stop _enable_checkpoint_early_stop = enable yield finally: _enable_checkpoint_early_stop = prev", - "docstring": "Context manager that sets whether checkpoint should stop recomputation early. By default, non-reentrant checkpoint stops recomputation as soon as it has computed all needed Tensors. This context manager can be used to disable that feature if it is problematic for your specific application. This context manager only needs to be active when forward is run. It does not need to be active during backward. Example:: >>> # xdoctest: +SKIP(failing) >>> message = \"saved tensors default hooks are disabled\" >>> with set_checkpoint_early_stop(False): ... # Any checkpoint under this context manager will respect this ... # context manager, even if its backward is performed outside. ... out = checkpoint(fn, inputs) ... >>> out.backward()", - "type": "function", - "file_path": "pytorch\\torch\\utils\\checkpoint.py", - "ast_data": "FunctionDef name:set_checkpoint_early_stop arguments arg:enable type:bool Try Assign Assign Assign" - }, - { - "library": "tensorflow", - "name": "getsourcefile", - "source_code": "def getsourcefile(object): return _inspect.getsourcefile(tf_decorator.unwrap(object)[1])", - "docstring": "TFDecorator-aware replacement for inspect.getsourcefile.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py", - "ast_data": "FunctionDef name:getsourcefile arguments arg:object Return return:yes" - }, - { - "library": "pandas", - "name": "join", - "source_code": "@forbid_nonstring_types(['bytes']) def join(self, sep: str): result = self._data.array._str_join(sep) return self._wrap_result(result)", - "docstring": "Join lists contained as elements in the Series/Index with passed delimiter. If the elements of a Series are lists themselves, join the content of these lists using the delimiter passed to the function. This function is an equivalent to :meth:. Parameters ---------- sep : str Delimiter to use between list entries. Returns ------- Series/Index: object The list entries concatenated by intervening occurrences of the delimiter. Raises ------ AttributeError If the supplied Series contains neither strings nor lists. See Also -------- str.join : Standard library version of this method. Series.str.split : Split strings around given separator/delimiter. Notes ----- If any of the list items is not a string object, the result of the join will be . Examples -------- Example with a list that contains non-string elements. >>> s = pd.Series( ... [ ... [\"lion\", \"elephant\", \"zebra\"], ... [1.1, 2.2, 3.3], ... [\"cat\", np.nan, \"dog\"], ... [\"cow\", 4.5, \"goat\"], ... [\"duck\", [\"swan\", \"fish\"], \"guppy\"], ... ] ... ) >>> s 0 [lion, elephant, zebra] 1 [1.1, 2.2, 3.3] 2 [cat, nan, dog] 3 [cow, 4.5, goat] 4 [duck, [swan, fish], guppy] dtype: object Join all lists using a '-'. The lists containing object(s) of types other than str will produce a NaN. >>> s.str.join(\"-\") 0 lion-elephant-zebra 1 NaN 2 NaN 3 NaN 4 NaN dtype: object", - "type": "method", - "file_path": "pandas\\pandas\\core\\strings\\accessor.py", - "ast_data": "FunctionDef name:join arguments arg:self arg:sep type:str Call call:forbid_nonstring_types Assign Call call:_str_join Return return:yes" - }, - { - "library": "pytorch", - "name": "transform_index_select", - "source_code": "@register_transformation_rule(IndexSelect) def transform_index_select(constraint, counter): dims, counter = gen_tensor_dims(constraint.tensor_size, counter) is_valid_index = valid_index(constraint.index, dims) nat_constraints = gen_nat_constraints(dims) if is_valid_index = = T(): new_dims = copy.deepcopy(dims) new_dims[constraint.index] = constraint.dim_replace transformed_constraint = Conj([BinConstraintT(constraint.input_var, TensorType(dims), op_eq), *nat_constraints, is_valid_index, BinConstraintT(constraint.output, TensorType(new_dims), op_eq)]) return (transformed_constraint, counter)", - "docstring": "The constraints consider the given tensor size, checks if the index is valid and if so, generates a constraint for replacing the input dimension with the required dimension", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py", - "ast_data": "FunctionDef name:transform_index_select arguments arg:constraint arg:counter Call call:register_transformation_rule Assign Call call:gen_tensor_dims Assign Call call:valid_index Assign Call call:gen_nat_constraints If Compare op:Eq Assign Call call:deepcopy Assign Assign Call call:Conj Return return:yes" - }, - { - "library": "scipy", - "name": "gaussian_laplace", - "source_code": "@_ni_docstrings.docfiller def gaussian_laplace(input, sigma, output = None, mode = 'reflect', cval = 0.0, *, axes = None, **kwargs): input = np.asarray(input) def derivative2(input, axis, output, mode, cval, sigma, **kwargs): order = [0] * input.ndim order[axis] = 2 return gaussian_filter(input, sigma, order, output, mode, cval, **kwargs) axes = _ni_support._check_axes(axes, input.ndim) num_axes = len(axes) sigma = _ni_support._normalize_sequence(sigma, num_axes) if num_axes < input.ndim: sigma_temp = [0] * input.ndim for s, ax in zip(sigma, axes): sigma_temp[ax] = s sigma = sigma_temp return generic_laplace(input, derivative2, output, mode, cval, extra_arguments = (sigma,), extra_keywords = kwargs, axes = axes)", - "docstring": "Multidimensional Laplace filter using Gaussian second derivatives. Parameters ---------- %(input)s sigma : scalar or sequence of scalars The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. %(output)s %(mode_multiple)s %(cval)s axes : tuple of int or None The axes over which to apply the filter. If or tuples are provided, their length must match the number of axes. Extra keyword arguments will be passed to gaussian_filter(). Returns ------- gaussian_laplace : ndarray Filtered array. Has the same shape as . Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> ascent = datasets.ascent() >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> result = ndimage.gaussian_laplace(ascent, sigma=1) >>> ax1.imshow(result) >>> result = ndimage.gaussian_laplace(ascent, sigma=3) >>> ax2.imshow(result) >>> plt.show()", - "type": "function", - "file_path": "scipy\\scipy\\ndimage\\_filters.py", - "ast_data": "FunctionDef name:gaussian_laplace arguments arg:input arg:sigma arg:output arg:mode arg:cval kwarg:kwargs Assign Call call:asarray FunctionDef name:derivative2 arguments arg:input arg:axis arg:output arg:mode arg:cval arg:sigma kwarg:kwargs Assign Assign Return return:yes Assign Call call:_check_axes Assign Call call:len Assign Call call:_normalize_sequence If Compare op:Lt Assign For Call call:zip Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "SymbolicGridFn", - "source_code": "class SymbolicGridFn: def __init__(self, fn: Callable[..., tuple[Any, Any, Any]]): self.fn = fn self.kwargs_int = {} self.kwargs_sym = {} params = inspect.signature(fn).parameters for name, fn_sym, fn_int in [('cdiv', CeilDiv, ceildiv), ('min', sympy.Min, min), ('max', sympy.Max, max)]: if name in params: self.kwargs_int[name] = fn_int self.kwargs_sym[name] = fn_sym def __call__(self, *args, **kwargs) -> tuple[int, int, int]: return self.fn(*args, **kwargs, **self.kwargs_int) def sympy_call(self, *args, **kwargs): return self.fn(*args, **kwargs, **self.kwargs_sym)", - "docstring": "Wrapper around a grid function that allows either int or sympy inputs. @SymbolicGridFn def grid(x, meta, *, cdiv): return cdiv(x, meta[\"BLOCK_X\"])", - "type": "class", - "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py", - "ast_data": "ClassDef name:SymbolicGridFn FunctionDef name:__init__ arguments arg:self arg:fn type:Callable[..., tuple[Any, Any, Any]] Assign Assign Assign Assign For If Compare op:In Assign Assign FunctionDef name:__call__ arguments arg:self vararg:args kwarg:kwargs Return return:yes FunctionDef name:sympy_call arguments arg:self vararg:args kwarg:kwargs Return return:yes" - }, - { - "library": "mongo", - "name": "delete_results", - "source_code": "@property def delete_results(self) -> Mapping[int, DeleteResult]: self._raise_if_unacknowledged('delete_results') self._raise_if_not_verbose('delete_results') return cast(Mapping[int, DeleteResult], self.bulk_api_result.get('deleteResults'))", - "docstring": "A map of successful delete operations to their results.", - "type": "method", - "file_path": "mongo\\pymongo\\results.py", - "ast_data": "FunctionDef name:delete_results arguments arg:self Return return:yes" - }, - { - "library": "salmon", - "name": "clear", - "source_code": "def clear(self): while len(self) > 0: self.pop()", - "docstring": "Clears out the contents of the entire queue. Warning: This could be horribly inefficient since it pops messages until the queue is empty. It could also cause an infinite loop if another process is writing to messages to the Queue faster than we can pop.", - "type": "method", - "file_path": "salmon\\salmon\\queue.py", - "ast_data": "FunctionDef name:clear arguments arg:self While Compare op:Gt" - }, - { - "library": "django", - "name": "get_max_age", - "source_code": "def get_max_age(response): if not response.has_header('Cache-Control'): return cc = dict((_to_tuple(el) for el in cc_delim_re.split(response.headers['Cache-Control']))) try: return int(cc['max-age']) except (ValueError, TypeError, KeyError): pass", - "docstring": "Return the max-age from the response Cache-Control header as an integer, or None if it wasn't found or wasn't an integer.", - "type": "function", - "file_path": "django\\django\\utils\\cache.py", - "ast_data": "FunctionDef name:get_max_age arguments arg:response If Return return:no Assign Call call:dict Try Return return:yes ExceptHandler" - }, - { - "library": "pytorch", - "name": "rename", - "source_code": "def rename(self, *names, **rename_map): if has_torch_function_unary(self): return handle_torch_function(Tensor.rename, (self,), self, *names, **rename_map) return update_names(self, names, rename_map, inplace = False)", - "docstring": "Renames dimension names of :attr:. There are two main usages: `rename_mapnamesnamesrename_map`. Examples:: >>> imgs = torch.rand(2, 3, 5, 7, names=('N', 'C', 'H', 'W')) >>> renamed_imgs = imgs.rename(N='batch', C='channels') >>> renamed_imgs.names ('batch', 'channels', 'H', 'W') >>> renamed_imgs = imgs.rename(None) >>> renamed_imgs.names (None, None, None, None) >>> renamed_imgs = imgs.rename('batch', 'channel', 'height', 'width') >>> renamed_imgs.names ('batch', 'channel', 'height', 'width') .. warning:: The named tensor API is experimental and subject to change.", - "type": "method", - "file_path": "pytorch\\torch\\_tensor.py", - "ast_data": "FunctionDef name:rename arguments arg:self vararg:names kwarg:rename_map If Call call:has_torch_function_unary Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "result_ilocs", - "source_code": "@final @cache_readonly def result_ilocs(self) -> npt.NDArray[np.intp]: ids = self.ids if self.has_dropped_na: mask = np.where(ids > = 0) null_gaps = np.cumsum(ids = = -1)[mask] ids = ids[mask] result = get_group_index_sorter(ids, self.ngroups) if self.has_dropped_na: result + = np.take(null_gaps, result) return result", - "docstring": "Get the original integer locations of result_index in the input.", - "type": "method", - "file_path": "pandas\\pandas\\core\\groupby\\ops.py", - "ast_data": "FunctionDef name:result_ilocs arguments arg:self Assign If Assign Call call:where Assign Assign Assign Call call:get_group_index_sorter If Return return:yes" - }, - { - "library": "django", - "name": "do_with", - "source_code": "@register.tag('with') def do_with(parser, token): bits = token.split_contents() remaining_bits = bits[1:] extra_context = token_kwargs(remaining_bits, parser, support_legacy = True) if not extra_context: raise TemplateSyntaxError('%r expected at least one variable assignment' % bits[0]) if remaining_bits: raise TemplateSyntaxError('%r received an invalid token: %r' % (bits[0], remaining_bits[0])) nodelist = parser.parse(('endwith',)) parser.delete_first_token() return WithNode(None, None, nodelist, extra_context = extra_context)", - "docstring": "Add one or more values to the context (inside of this block) for caching and easy access. For example:: {% with total=person.some_sql_method %} {{ total }} object{{ total|pluralize }} {% endwith %} Multiple values can be added to the context:: {% with foo=1 bar=2 %} ... {% endwith %} The legacy format of `` is still accepted.", - "type": "function", - "file_path": "django\\django\\template\\defaulttags.py", - "ast_data": "FunctionDef name:do_with arguments arg:parser arg:token Call call:tag Assign Call call:split_contents Assign Assign Call call:token_kwargs If Raise raises:TemplateSyntaxError('%r expected at least one variable assignment' % bits[0]) If Raise raises:TemplateSyntaxError('%r received an invalid token: %r' % (bits[0], remaining_bits[0])) Assign Call call:parse Return return:yes" - }, - { - "library": "matplotlib", - "name": "line_2d_to_3d", - "source_code": "def line_2d_to_3d(line, zs = 0, zdir = 'z', axlim_clip = False): line.__class__ = Line3D line.set_3d_properties(zs, zdir, axlim_clip)", - "docstring": "Convert a to a object. Parameters ---------- zs : float The location along the *zdir* axis in 3D space to position the line. zdir : {'x', 'y', 'z'} Plane to plot line orthogonal to. Default: 'z'. See for a description of the values. axlim_clip : bool, default: False Whether to hide lines with an endpoint outside the axes view limits. .. versionadded:: 3.10", - "type": "function", - "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", - "ast_data": "FunctionDef name:line_2d_to_3d arguments arg:line arg:zs arg:zdir arg:axlim_clip Assign" - }, - { - "library": "django", - "name": "django_table_names", - "source_code": "def django_table_names(self, only_existing = False, include_views = True): tables = set() for model in self.get_migratable_models(): if not model._meta.managed: continue tables.add(model._meta.db_table) tables.update((f.m2m_db_table() for f in model._meta.local_many_to_many if f.remote_field.through._meta.managed)) tables = list(tables) if only_existing: existing_tables = set(self.table_names(include_views = include_views)) tables = [t for t in tables if self.identifier_converter(t) in existing_tables] return tables", - "docstring": "Return a list of all table names that have associated Django models and are in INSTALLED_APPS. If only_existing is True, include only the tables in the database.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\introspection.py", - "ast_data": "FunctionDef name:django_table_names arguments arg:self arg:only_existing arg:include_views Assign Call call:set For Call call:get_migratable_models If Assign Call call:list If Assign Call call:set Assign Return return:yes" - }, - { - "library": "django", - "name": "Session", - "source_code": "class Session(AbstractBaseSession): objects = SessionManager() @classmethod def get_session_store_class(cls): from django.contrib.sessions.backends.db import SessionStore return SessionStore class Meta(AbstractBaseSession.Meta): db_table = 'django_session'", - "docstring": "Django provides full support for anonymous sessions. The session framework lets you store and retrieve arbitrary data on a per-site-visitor basis. It stores data on the server side and abstracts the sending and receiving of cookies. Cookies contain a session ID -- not the data itself. The Django sessions framework is entirely cookie-based. It does not fall back to putting session IDs in URLs. This is an intentional design decision. Not only does that behavior make URLs ugly, it makes your site vulnerable to session-ID theft via the \"Referer\" header. For complete documentation on using Sessions in your code, consult the sessions documentation that is shipped with Django (also available on the Django web site).", - "type": "class", - "file_path": "django\\django\\contrib\\sessions\\models.py", - "ast_data": "ClassDef name:Session Assign Call call:SessionManager FunctionDef name:get_session_store_class arguments arg:cls Return return:yes ClassDef name:Meta Assign" - }, - { - "library": "pandas", - "name": "MultiIndexUInt64Engine", - "source_code": "class MultiIndexUInt64Engine(libindex.BaseMultiIndexCodesEngine, libindex.UInt64Engine): _base = libindex.UInt64Engine _codes_dtype = 'uint64'", - "docstring": "Manages a MultiIndex by mapping label combinations to positive integers. The number of possible label combinations must not overflow the 64 bits integers.", - "type": "class", - "file_path": "pandas\\pandas\\core\\indexes\\multi.py", - "ast_data": "ClassDef name:MultiIndexUInt64Engine Assign Assign" - }, - { - "library": "scipy", - "name": "nllf", - "source_code": "def nllf(self, params = None, data = None): params = params if params is not None else self.params data = data if data is not None else self._data return self._dist.nnlf(theta = params, x = data)", - "docstring": "Negative log-likelihood function Evaluates the negative of the log-likelihood function of the provided data at the provided parameters. Parameters ---------- params : tuple, optional The shape parameters, location, and (if applicable) scale of the distribution as a single tuple. Default is the maximum likelihood estimates (``). data : array_like, optional The data for which the log-likelihood function is to be evaluated. Default is the data to which the distribution was fit. Returns ------- nllf : float The negative of the log-likelihood function.", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_fit.py", - "ast_data": "FunctionDef name:nllf arguments arg:self arg:params arg:data Assign Assign Return return:yes" - }, - { - "library": "sphinx", - "name": "new_document", - "source_code": "def new_document(self) -> nodes.document: document = super().new_document() document.transformer = SphinxTransformer(document) document.transformer.set_environment(self.settings.env) reporter = document.reporter document.reporter = LoggingReporter.from_reporter(reporter) return document", - "docstring": "Creates a new document object which has a special reporter object good for logging.", - "type": "method", - "file_path": "sphinx\\sphinx\\io.py", - "ast_data": "FunctionDef name:new_document arguments arg:self Assign Call call:new_document Assign Call call:SphinxTransformer Assign Assign Call call:from_reporter Return return:yes" - }, - { - "library": "tensorflow", - "name": "RepresentativeDatasetLoader", - "source_code": "class RepresentativeDatasetLoader: def load(self) -> RepresentativeDatasetMapping: raise NotImplementedError('Method \"load\" is not implemented.')", - "docstring": "Representative dataset loader. Exposes the method that loads the representative dataset from files.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\representative_dataset.py", - "ast_data": "ClassDef name:RepresentativeDatasetLoader FunctionDef name:load arguments arg:self Raise raises:NotImplementedError('Method \"load\" is not implemented.')" - }, - { - "library": "pytorch", - "name": "get_real_value", - "source_code": "def get_real_value(node, tracer): from .exc import TorchRuntimeError cache = tracer.real_value_cache if node in cache: return cache[node] op = node.op args, kwargs = torch.fx.node.map_arg((node.args, node.kwargs), lambda n: get_real_value(n, tracer)) if op = = 'placeholder' and 'grapharg' in node.meta: return node.meta['grapharg'].example if op = = 'call_module': nn_module = tracer.output_graph.nn_modules[node.target] if not is_lazy_module(nn_module): nn_module = copy.deepcopy(nn_module) else: nn_module(*args, **kwargs) else: nn_module = None try: real_value = run_node(tracer, node, args, kwargs, nn_module) cache[node] = real_value except RuntimeError as e: raise TorchRuntimeError(str(e)).with_traceback(e.__traceback__) from None return real_value", - "docstring": "Run the actual computation represented by and return the result. This will execute any dependent nodes in the graph as well.", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\utils.py", - "ast_data": "FunctionDef name:get_real_value arguments arg:node arg:tracer Assign If Compare op:In Return return:yes Assign Assign Call call:map_arg If BoolOp Compare op:Eq Compare op:In Return return:yes If Compare op:Eq Assign If Assign Call call:deepcopy Assign Try Assign Call call:run_node Assign ExceptHandler Raise raises:TorchRuntimeError(str(e)).with_traceback(e.__traceback__) Return return:yes" - }, - { - "library": "tensorflow", - "name": "record_summaries_every_n_global_steps", - "source_code": "def record_summaries_every_n_global_steps(n, global_step = None): if global_step is None: global_step = training_util.get_or_create_global_step() with ops.device('cpu: 0'): should = lambda: math_ops.equal(global_step % n, 0) if not context.executing_eagerly(): should = should() return record_if(should)", - "docstring": "Sets the should_record_summaries Tensor to true if global_step % n == 0.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", - "ast_data": "FunctionDef name:record_summaries_every_n_global_steps arguments arg:n arg:global_step If Compare op:Is Assign Call call:get_or_create_global_step With Assign If Assign Call call:should Return return:yes" - }, - { - "library": "pytorch", - "name": "opening_comment_lines", - "source_code": "@cached_property def opening_comment_lines(self) -> int: it = (i for i, s in enumerate(self.lines) if not s.startswith('#')) return next(it, 0)", - "docstring": "The number of comments at the very top of the file.", - "type": "method", - "file_path": "pytorch\\tools\\linter\\adapters\\_linter.py", - "ast_data": "FunctionDef name:opening_comment_lines arguments arg:self Assign Return return:yes" - }, - { - "library": "algorithms", - "name": "schedule", - "source_code": "def schedule(job): job = sorted(job, key = lambda j: j.finish) length = len(job) table = [0 for _ in range(length)] table[0] = job[0].profit for i in range(1, length): incl_prof = job[i].profit pos = binary_search(job, i) if pos ! = -1: incl_prof + = table[pos] table[i] = max(incl_prof, table[i - 1]) return table[length - 1]", - "docstring": "The main function that returns the maximum possible profit from given array of jobs", - "type": "function", - "file_path": "algorithms\\algorithms\\dp\\job_scheduling.py", - "ast_data": "FunctionDef name:schedule arguments arg:job Assign Call call:sorted Assign Call call:len Assign Assign For Call call:range Assign Assign Call call:binary_search If Compare op:NotEq Assign Call call:max Return return:yes" - }, - { - "library": "pandas", - "name": "to_pydatetime", - "source_code": "def to_pydatetime(self) -> npt.NDArray[np.object_]: return ints_to_pydatetime(self.asi8, tz = self.tz, reso = self._creso)", - "docstring": "Return an ndarray of `` objects. See Also -------- DatetimeIndex.to_julian_date : Converts Datetime Array to float64 ndarray of Julian Dates. Examples -------- >>> idx = pd.date_range(\"2018-02-27\", periods=3) >>> idx.to_pydatetime() array([datetime.datetime(2018, 2, 27, 0, 0), datetime.datetime(2018, 2, 28, 0, 0), datetime.datetime(2018, 3, 1, 0, 0)], dtype=object)", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\datetimes.py", - "ast_data": "FunctionDef name:to_pydatetime arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "start", - "source_code": "def start(self, timeout: int = 60, num_retries: int = 3, stderr: Union[int, TextIO, None] = None) -> None: curr_retries = 0 while True: try: data_dir = os.path.join(self._base_data_dir, str(curr_retries)) os.makedirs(data_dir, exist_ok = True) return self._start(data_dir, timeout, stderr) except Exception as e: curr_retries + = 1 stop_etcd(self._etcd_proc) logger.warning('Failed to start etcd server, got error: %s, retrying', str(e)) if curr_retries > = num_retries: shutil.rmtree(self._base_data_dir, ignore_errors = True) raise atexit.register(stop_etcd, self._etcd_proc, self._base_data_dir)", - "docstring": "Start the server, and waits for it to be ready. When this function returns the sever is ready to take requests. Args: timeout: time (in seconds) to wait for the server to be ready before giving up. num_retries: number of retries to start the server. Each retry will wait for max `subprocess.PIPEsubprocess.DEVNULLNone`. Raises: TimeoutError: if the server is not ready within the specified timeout", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_server.py", - "ast_data": "FunctionDef name:start arguments arg:self arg:timeout type:int arg:num_retries type:int arg:stderr type:Union[int, TextIO, None] Assign While Try Assign Call call:join Return return:yes ExceptHandler If Compare op:GtE Raise" - }, - { - "library": "coconut", - "name": "paren_change", - "source_code": "def paren_change(inputstr, opens = open_chars, closes = close_chars): count = 0 for c in inputstr: if c in opens: count - = 1 elif c in closes: count + = 1 return count", - "docstring": "Determine the parenthetical change of level (num closes - num opens).", - "type": "function", - "file_path": "coconut\\coconut\\compiler\\util.py", - "ast_data": "FunctionDef name:paren_change arguments arg:inputstr arg:opens arg:closes Assign For If Compare op:In If Compare op:In Return return:yes" - }, - { - "library": "pytorch", - "name": "compile_package", - "source_code": "@indent_msg def compile_package(self, path: Path, top_package_path: Path): assert path.is_dir() if path.name in DENY_LIST: self.msg(path, 'X') return is_package_dir = any((child.name = = '__init__.py' for child in path.iterdir())) if not is_package_dir: self.msg(path, 'S') return self.msg(path, 'P') for child in path.iterdir(): self.compile_path(child, top_package_path)", - "docstring": "Compile all the files within a Python package dir.", - "type": "method", - "file_path": "pytorch\\torch\\utils\\_freeze.py", - "ast_data": "FunctionDef name:compile_package arguments arg:self arg:path type:Path arg:top_package_path type:Path If Compare op:In Return return:no Assign Call call:any If Return return:no For Call call:iterdir" - }, - { - "library": "matplotlib", - "name": "set_linestyle", - "source_code": "def set_linestyle(self, ls): if ls is None: ls = 'solid' if ls in [' ', '', 'none']: ls = 'None' self._linestyle = ls self._unscaled_dash_pattern = mlines._get_dash_pattern(ls) self._dash_pattern = mlines._scale_dashes(*self._unscaled_dash_pattern, self._linewidth) self.stale = True", - "docstring": "Set the patch linestyle. ======================================================= ================ linestyle description ======================================================= ================ `` is an even length tuple of on and off ink in points. Parameters ---------- ls : {'-', '--', '-.', ':', '', (offset, on-off-seq), ...} The line style.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:set_linestyle arguments arg:self arg:ls If Compare op:Is Assign If Compare op:In Assign Assign Assign Call call:_get_dash_pattern Assign Call call:_scale_dashes Assign" - }, - { - "library": "tensorflow", - "name": "global_variables_initializer", - "source_code": "@tf_export(v1 = ['initializers.global_variables', 'global_variables_initializer']) def global_variables_initializer(): if context.executing_eagerly(): return control_flow_ops.no_op(name = 'global_variables_initializer') return variables_initializer(global_variables())", - "docstring": "Returns an Op that initializes global variables. This is just a shortcut for @compatibility(TF2) In TF2, variables are initialized immediately when they are created. There is no longer a need to run variable initializers before using them. @end_compatibility Returns: An Op that initializes global variables in the graph.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\variables.py", - "ast_data": "FunctionDef name:global_variables_initializer arguments Call call:tf_export If Call call:executing_eagerly Return return:yes Return return:yes" - }, - { - "library": "salmon", - "name": "clear", - "source_code": "def clear(self): del self.attachments[:] del self.base.parts[:] self.multipart = False", - "docstring": "Clears out the attachments so you can redo them. Use this to keep the headers for a series of different messages with different attachments.", - "type": "method", - "file_path": "salmon\\salmon\\mail.py", - "ast_data": "FunctionDef name:clear arguments arg:self Assign" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, name: Text, num_replicas: int, pivot: ops.Operation): super(TPUReplicateContext, self).__init__() self._num_replicas = num_replicas self._outer_device_function_stack = None self._oc_dev_fn_stack = None self._outside_compilation_cluster = None self._is_map_outside_compilation = False self._outside_compilation_v2_context = None self._outside_compilation_counter = 0 self._in_gradient_colocation = None self._gradient_colocation_stack = [] self._host_compute_core = [] self._name = name self._tpu_replicate_attr = attr_value_pb2.AttrValue(s = compat.as_bytes(self._name)) self._unsupported_ops = [] self._pivot = pivot self._replicated_vars = {}", - "docstring": "Builds a new TPUReplicateContext. Args: name: a unique name for the context, used to populate the attribute. num_replicas: an integer that gives the number of replicas for the computation. pivot: a pivot node. Nodes in the TPUReplicateContext that do not have any inputs will have a control dependency on the pivot node. This ensures that nodes are correctly included in any enclosing control flow contexts.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_replication.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:name type:Text arg:num_replicas type:int arg:pivot type:ops.Operation Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Call call:AttrValue Assign Assign Assign" - }, - { - "library": "mongo", - "name": "QueryType", - "source_code": "class QueryType(str, enum.Enum): EQUALITY = 'equality' 'Used to encrypt a value for an equality query.' RANGE = 'range' 'Used to encrypt a value for a range query.\\n\\n .. versionadded: : 4.9\\n ' RANGEPREVIEW = 'RangePreview' '**DEPRECATED** - Used to encrypt a value for a rangePreview query.\\n\\n .. note: : Support for RangePreview is deprecated. Use: attr: `QueryType.RANGE` instead.\\n\\n .. versionadded: : 4.4\\n '", - "docstring": "An enum that defines the supported values for explicit encryption query_type. .. versionadded:: 4.2", - "type": "class", - "file_path": "mongo\\pymongo\\asynchronous\\encryption.py", - "ast_data": "ClassDef name:QueryType Assign Assign Assign" - }, - { - "library": "scipy", - "name": "accept_test", - "source_code": "def accept_test(self, x_new = None, *args, **kwargs): if not hasattr(self.function, 'xmin'): return True if np.any(x_new < self.function.xmin): return False if np.any(x_new > self.function.xmax): return False return True", - "docstring": "Does the new candidate vector lie in between the bounds? Returns ------- accept_test : bool The candidate vector lies in between the bounds", - "type": "method", - "file_path": "scipy\\benchmarks\\benchmarks\\optimize.py", - "ast_data": "FunctionDef name:accept_test arguments arg:self arg:x_new vararg:args kwarg:kwargs If Return return:yes If Call call:any Return return:yes If Call call:any Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "score_size", - "source_code": "def score_size(self, size1, size2): if size2 = = 'scalable': return 0.0 try: sizeval1 = float(size1) except ValueError: sizeval1 = self.default_size * font_scalings[size1] try: sizeval2 = float(size2) except ValueError: return 1.0 return abs(sizeval1 - sizeval2) / 72", - "docstring": "Return a match score between *size1* and *size2*. If *size2* (the size specified in the font file) is 'scalable', this function always returns 0.0, since any font size can be generated. Otherwise, the result is the absolute distance between *size1* and *size2*, normalized so that the usual range of font sizes (6pt - 72pt) will lie between 0.0 and 1.0.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py", - "ast_data": "FunctionDef name:score_size arguments arg:self arg:size1 arg:size2 If Compare op:Eq Return return:yes Try Assign Call call:float ExceptHandler Assign Try Assign Call call:float ExceptHandler Return return:yes Return return:yes" - }, - { - "library": "feincms", - "name": "prefetch_modeladmin_get_queryset", - "source_code": "def prefetch_modeladmin_get_queryset(modeladmin, *lookups): def do_wrap(f): @wraps(f) def wrapper(request, *args, **kwargs): qs = f(request, *args, **kwargs) qs = qs.prefetch_related(*lookups) return qs return wrapper modeladmin.get_queryset = do_wrap(modeladmin.get_queryset)", - "docstring": "Wraps default modeladmin `` to prefetch related lookups.", - "type": "function", - "file_path": "feincms\\feincms\\extensions\\base.py", - "ast_data": "FunctionDef name:prefetch_modeladmin_get_queryset arguments arg:modeladmin vararg:lookups FunctionDef name:do_wrap arguments arg:f FunctionDef name:wrapper arguments arg:request vararg:args kwarg:kwargs Call call:wraps Assign Call call:f Assign Call call:prefetch_related Return return:yes Return return:yes Assign Call call:do_wrap" - }, - { - "library": "pytorch", - "name": "set_sharing_strategy", - "source_code": "def set_sharing_strategy(new_strategy): global _sharing_strategy assert new_strategy in _all_sharing_strategies _sharing_strategy = new_strategy", - "docstring": "Set the strategy for sharing CPU tensors. Args: new_strategy (str): Name of the selected strategy. Should be one of the values returned by :func:.", - "type": "function", - "file_path": "pytorch\\torch\\multiprocessing\\__init__.py", - "ast_data": "FunctionDef name:set_sharing_strategy arguments arg:new_strategy Assign" - }, - { - "library": "django", - "name": "length", - "source_code": "@property def length(self): return capi.geos_length(self.ptr, byref(c_double()))", - "docstring": "Return the length of this Geometry (e.g., 0 for point, or the circumference of a Polygon).", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", - "ast_data": "FunctionDef name:length arguments arg:self Return return:yes" - }, - { - "library": "numpy", - "name": "njoin", - "source_code": "def njoin(*path): paths = [] for p in path: if is_sequence(p): paths.append(njoin(*p)) else: assert is_string(p) paths.append(p) path = paths if not path: joined = '' else: joined = os.path.join(*path) if os.path.sep ! = '/': joined = joined.replace('/', os.path.sep) return minrelpath(joined)", - "docstring": "Join two or more pathname components + - convert a /-separated pathname to one using the OS's path separator. - resolve and from path. Either passing n arguments as in njoin('a','b'), or a sequence of n names as in njoin(['a','b']) is handled, or a mixture of such arguments.", - "type": "function", - "file_path": "numpy\\numpy\\distutils\\misc_util.py", - "ast_data": "FunctionDef name:njoin arguments vararg:path Assign For If Call call:is_sequence Assign If Assign Assign Call call:join If Compare op:NotEq Assign Call call:replace Return return:yes" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, widthA = 1.0, angleA = 0, widthB = 1.0, angleB = 0): super().__init__(widthA = widthA, lengthA = 0, angleA = angleA, widthB = widthB, lengthB = 0, angleB = angleB)", - "docstring": "Parameters ---------- widthA, widthB : float, default: 1.0 Width of the bracket. angleA, angleB : float, default: 0 degrees Orientation of the bracket, as a counterclockwise angle. 0 degrees means perpendicular to the line.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:widthA arg:angleA arg:widthB arg:angleB" - }, - { - "library": "django", - "name": "metadata", - "source_code": "@property def metadata(self): domain_list = ['DEFAULT'] meta_list = capi.get_ds_metadata_domain_list(self._ptr) if meta_list: counter = 0 domain = meta_list[counter] while domain: domain_list.append(domain.decode()) counter + = 1 domain = meta_list[counter] capi.free_dsl(meta_list) result = {} for domain in domain_list: data = capi.get_ds_metadata(self._ptr, None if domain = = 'DEFAULT' else domain.encode()) if not data: continue domain_meta = {} counter = 0 item = data[counter] while item: key, val = item.decode().split(' = ') domain_meta[key] = val counter + = 1 item = data[counter] result[domain or 'DEFAULT'] = domain_meta return result", - "docstring": "Return the metadata for this raster or band. The return value is a nested dictionary, where the first-level key is the metadata domain and the second-level is the metadata item names and values for that domain.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\base.py", - "ast_data": "FunctionDef name:metadata arguments arg:self Assign Assign Call call:get_ds_metadata_domain_list If Assign Assign While Assign Assign For Assign Call call:get_ds_metadata If Assign Assign Assign While Assign Call call:split Assign Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "Callable", - "source_code": "class Callable(saveable_object.SaveSpec): def __init__(self, tensor_callable, dtype, device): super().__init__(tensor_callable, None, None, dtype, device)", - "docstring": "A callable that represents a Tensor that should be saved to checkpoint. This can be returned from in place of a Tensor. The callable will be executed on the specified device when the checkpoint is about to be written. Any class can use for checkpointing, but for SavedModel export, only resource-type variables* are supported. * must return True.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\tensor_callable.py", - "ast_data": "ClassDef name:Callable FunctionDef name:__init__ arguments arg:self arg:tensor_callable arg:dtype arg:device" - }, - { - "library": "scikit-learn", - "name": "default_dtypes", - "source_code": "def default_dtypes(self, *, device = None): return {'real floating': dtype(float64), 'complex floating': dtype(complex128), 'integral': dtype(intp), 'indexing': dtype(intp)}", - "docstring": "The default data types used for new CuPy arrays. For CuPy, this always returns the following dictionary: - **\"real floating\"**: `` Parameters ---------- device : str, optional The device to get the default data types for. Returns ------- dtypes : dict A dictionary describing the default data types used for new CuPy arrays. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.dtypes, __array_namespace_info__.devices Examples -------- >>> info = xp.__array_namespace_info__() >>> info.default_dtypes() {'real floating': cupy.float64, 'complex floating': cupy.complex128, 'integral': cupy.int64, 'indexing': cupy.int64}", - "type": "method", - "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\cupy\\_info.py", - "ast_data": "FunctionDef name:default_dtypes arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "post_pr_comment", - "source_code": "def post_pr_comment(org: str, project: str, pr_num: int, msg: str, dry_run: bool = False) -> list[dict[str, Any]]: internal_debugging = '' run_url = os.getenv('GH_RUN_URL') if run_url is not None: internal_debugging = '\\n'.join((line for line in ('
Details for Dev Infra team', f'Raised by workflow job\\n', '
') if line)) comment = '\\n'.join((f'### Cherry picking #{pr_num}', f'{msg}', '', f'{internal_debugging}')) return gh_post_pr_comment(org, project, pr_num, comment, dry_run)", - "docstring": "Post a comment on the PR itself to point to the cherry picking PR when success or print the error when failure", - "type": "function", - "file_path": "pytorch\\.github\\scripts\\cherry_pick.py", - "ast_data": "FunctionDef name:post_pr_comment arguments arg:org type:str arg:project type:str arg:pr_num type:int arg:msg type:str arg:dry_run type:bool Assign Assign Call call:getenv If Compare op:IsNot Assign Call call:join Assign Call call:join Return return:yes" - }, - { - "library": "kornia", - "name": "forward", - "source_code": "def forward(self, patch: torch.Tensor) -> torch.Tensor: KORNIA_CHECK_SHAPE(patch, ['B', '1', 'H', 'W']) self.weighting = self.weighting.to(patch.dtype).to(patch.device) grads: torch.Tensor = self.gradient(patch) * self.weighting gx: torch.Tensor = grads[:, :, 0] gy: torch.Tensor = grads[:, :, 1] ellipse_shape = torch.cat([gx.pow(2).mean(dim = 2).mean(dim = 2, keepdim = True), (gx * gy).mean(dim = 2).mean(dim = 2, keepdim = True), gy.pow(2).mean(dim = 2).mean(dim = 2, keepdim = True)], dim = 2) bad_mask = ((ellipse_shape < self.eps).float().sum(dim = 2, keepdim = True) > = 2).to(ellipse_shape.dtype) circular_shape = torch.tensor([1.0, 0.0, 1.0]).to(ellipse_shape.device).to(ellipse_shape.dtype).view(1, 1, 3) ellipse_shape = ellipse_shape * (1.0 - bad_mask) + circular_shape * bad_mask ellipse_shape = ellipse_shape / ellipse_shape.max(dim = 2, keepdim = True)[0] return ellipse_shape", - "docstring": "Run forward. Args: patch: :math: Returns: torch.Tensor: ellipse_shape :math:", - "type": "method", - "file_path": "kornia\\kornia\\feature\\affine_shape.py", - "ast_data": "FunctionDef name:forward arguments arg:self arg:patch type:torch.Tensor Assign Call call:to Assign Call call:cat Assign Call call:to Assign Call call:view Assign Assign Return return:yes" - }, - { - "library": "pygame", - "name": "time", - "source_code": "def time(): _check_init() return _pypm.Time()", - "docstring": "returns the current time in ms of the PortMidi timer pygame.midi.time(): return time The time is reset to 0, when the module is inited.", - "type": "function", - "file_path": "pygame\\src_py\\midi.py", - "ast_data": "FunctionDef name:time arguments Return return:yes" - }, - { - "library": "pytorch", - "name": "store_timeout", - "source_code": "@contextmanager def store_timeout(store, timeout: float): old_timeout = store.timeout store.set_timeout(timedelta(seconds = timeout)) yield store.set_timeout(old_timeout)", - "docstring": "This sets the timeout and then restores the old timeout when the context manager exits. Args: store: the store to set the timeout on timeout: the timeout to set", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\elastic\\utils\\store.py", - "ast_data": "FunctionDef name:store_timeout arguments arg:store arg:timeout type:float Assign" - }, - { - "library": "pytorch", - "name": "update_obs_for_equalization", - "source_code": "def update_obs_for_equalization(model: GraphModule, modules: dict[str, nn.Module]) -> dict[str, _WeightEqualizationObserver]: weight_eq_obs_dict = {} for node in model.graph.nodes: if node.op = = 'call_module' and isinstance(modules[node.target], _InputEqualizationObserver): input_eq_obs = modules[node.target] assert isinstance(input_eq_obs, _InputEqualizationObserver) op_node, weight_eq_obs = get_op_node_and_weight_eq_obs(node, model, modules) if op_node is None or weight_eq_obs is None: continue if op_node.op = = 'call_module': if fused_module_supports_equalization(modules[str(op_node.target)]): module = modules[str(op_node.target)][0] assert nn_module_supports_equalization(module) weight_eq_obs(module.weight) else: weight_eq_obs(modules[str(op_node.target)].weight) equalization_scale = calculate_equalization_scale(input_eq_obs, weight_eq_obs) input_eq_obs.set_equalization_scale(equalization_scale) weight_eq_obs.set_equalization_scale(equalization_scale) weight_eq_obs_dict[op_node.name] = weight_eq_obs return weight_eq_obs_dict", - "docstring": "Update all of the observer's equalization scale. For each InputEqualizationObserver, we will find the location of the next WeightEqualizationObserver, create it, and calculate the equalization scale based on the two observers. We will then return a dictionary mapping operation node names to the corresponding WeightEqualizationObservers for that operation.", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_equalize.py", - "ast_data": "FunctionDef name:update_obs_for_equalization arguments arg:model type:GraphModule arg:modules type:dict[str, nn.Module] Assign For If BoolOp Compare op:Eq Call call:isinstance Assign Assign Call call:get_op_node_and_weight_eq_obs If BoolOp Compare op:Is Compare op:Is If Compare op:Eq If Call call:fused_module_supports_equalization Assign Assign Call call:calculate_equalization_scale Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "ldu", - "source_code": "def ldu(load_v, name): try: return load_v() except (KeyError, AttributeError, NameError): return Undefined(name)", - "docstring": "Load variable operator that returns Undefined when failing to evaluate. Note: the name (\"load or return undefined\") is abbreviated to minimize the amount of clutter in generated code. This variant of is useful when loading symbols that may be undefined at runtime, such as composite symbols, and whether they are defined or not cannot be determined statically. For example is undefined when is an empty dict. Args: load_v: Lambda that executes the actual read. name: Human-readable name of the symbol being read. Returns: Either the value of the symbol, or Undefined, if the symbol is not fully defined.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\variables.py", - "ast_data": "FunctionDef name:ldu arguments arg:load_v arg:name Try Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "tensorflow", - "name": "getdoc", - "source_code": "def getdoc(object): return _inspect.getdoc(object)", - "docstring": "TFDecorator-aware replacement for inspect.getdoc. Args: object: An object, possibly decorated. Returns: The docstring associated with the object. The outermost-decorated object is intended to have the most complete documentation, so the decorated parameter is not unwrapped.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py", - "ast_data": "FunctionDef name:getdoc arguments arg:object Return return:yes" - }, - { - "library": "django", - "name": "DetailView", - "source_code": "class DetailView(SingleObjectTemplateResponseMixin, BaseDetailView): pass", - "docstring": "Render a \"detail\" view of an object. By default this is a model instance looked up from , but the view will support display of *any* object by overriding .", - "type": "class", - "file_path": "django\\django\\views\\generic\\detail.py", - "ast_data": "ClassDef name:DetailView" - }, - { - "library": "pytorch", - "name": "forward", - "source_code": "def forward(self, x: torch.Tensor) -> torch.Tensor: weight_quant_dequant = self.get_weight() result = F.linear(x, weight_quant_dequant, self.bias) return result", - "docstring": "we have: w(float) -- quant - dequant x(float) ------------- F.linear --- In the full model, we will see w(float) -- quant - *dequant x -- quant --- *dequant -- *F.linear --- *quant - dequant and the backend should be able to fuse the ops with into a quantized linear", - "type": "method", - "file_path": "pytorch\\torch\\ao\\nn\\quantized\\reference\\modules\\linear.py", - "ast_data": "FunctionDef name:forward arguments arg:self arg:x type:torch.Tensor Assign Call call:get_weight Assign Call call:linear Return return:yes" - }, - { - "library": "mongo", - "name": "upload_from_stream", - "source_code": "@_csot.apply def upload_from_stream(self, filename: str, source: Any, chunk_size_bytes: Optional[int] = None, metadata: Optional[Mapping[str, Any]] = None, session: Optional[ClientSession] = None) -> ObjectId: with self.open_upload_stream(filename, chunk_size_bytes, metadata, session = session) as gin: gin.write(source) return cast(ObjectId, gin._id)", - "docstring": "Uploads a user file to a GridFS bucket. Reads the contents of the user file from and uploads it to the file . Source can be a string or file-like object. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) file_id = fs.upload_from_stream( \"test_file\", \"data I want to store!\", chunk_size_bytes=4, metadata={\"contentType\": \"text/plain\"}) Returns the _id of the uploaded file. Raises :exc: if no such version of that file exists. Raises :exc: if is not a string. :param filename: The name of the file to upload. :param source: The source stream of the content to be uploaded. Must be a file-like object that implements :meth: or a string. :param chunk_size_bytesGridFSBucket~pymongo.client_session.ClientSession` parameter.", - "type": "method", - "file_path": "mongo\\gridfs\\synchronous\\grid_file.py", - "ast_data": "FunctionDef name:upload_from_stream arguments arg:self arg:filename type:str arg:source type:Any arg:chunk_size_bytes type:Optional[int] arg:metadata type:Optional[Mapping[str, Any]] arg:session type:Optional[ClientSession] With Return return:yes" - }, - { - "library": "pytorch", - "name": "compute_local_stride", - "source_code": "def compute_local_stride(global_stride: ShapeType, mesh: DeviceMesh, placements: Sequence[Placement]) -> tuple[int, ...]: stride_divisors = [1] * len(global_stride) for mesh_idx, p in enumerate(placements): if p.is_shard(): i = cast(Shard, p).dim for j in range(len(global_stride)): if global_stride[j] > global_stride[i]: stride_divisors[j] * = mesh.size(mesh_idx) return tuple((global_stride[i] // stride_divisors[i] for i in range(len(global_stride))))", - "docstring": "Compute the stride of a local tensor shard, given the global stride of the DTensor. NOTE: Currently this function is assuming the DTensor is evenly shardable.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\tensor\\_utils.py", - "ast_data": "FunctionDef name:compute_local_stride arguments arg:global_stride type:ShapeType arg:mesh type:DeviceMesh arg:placements type:Sequence[Placement] Assign For Call call:enumerate If Call call:is_shard Assign For Call call:range If Compare op:Gt Return return:yes" - }, - { - "library": "pandas", - "name": "get_level_lengths", - "source_code": "def get_level_lengths(levels: Any, sentinel: bool | object | str = '') -> list[dict[int, int]]: if len(levels) = = 0: return [] control = [True] * len(levels[0]) result = [] for level in levels: last_index = 0 lengths = {} for i, key in enumerate(level): if control[i] and key = = sentinel: pass else: control[i] = False lengths[last_index] = i - last_index last_index = i lengths[last_index] = len(level) - last_index result.append(lengths) return result", - "docstring": "For each index in each level the function returns lengths of indexes. Parameters ---------- levels : list of lists List of values on for level. sentinel : string, optional Value which states that no new index starts on there. Returns ------- Returns list of maps. For each level returns map of indexes (key is index in row and value is length of index).", - "type": "function", - "file_path": "pandas\\pandas\\io\\formats\\format.py", - "ast_data": "FunctionDef name:get_level_lengths arguments arg:levels type:Any arg:sentinel type:bool | object | str If Compare op:Eq Return return:yes Assign Assign For Assign Assign For Call call:enumerate If BoolOp Compare op:Eq Assign Assign Assign Assign Return return:yes" - }, - { - "library": "seaborn", - "name": "set_palette", - "source_code": "def set_palette(palette, n_colors = None, desat = None, color_codes = False): colors = palettes.color_palette(palette, n_colors, desat) cyl = cycler('color', colors) mpl.rcParams['axes.prop_cycle'] = cyl if color_codes: try: palettes.set_color_codes(palette) except (ValueError, TypeError): pass", - "docstring": "Set the matplotlib color cycle using a seaborn palette. Parameters ---------- palette : seaborn color palette | matplotlib colormap | hls | husl Palette definition. Should be something :func: can process. n_colors : int Number of colors in the cycle. The default number of colors will depend on the format of `color_palette` statement. set_context : set parameters to scale plot elements set_style : set the default parameters for figure style", - "type": "function", - "file_path": "seaborn\\seaborn\\rcmod.py", - "ast_data": "FunctionDef name:set_palette arguments arg:palette arg:n_colors arg:desat arg:color_codes Assign Call call:color_palette Assign Call call:cycler Assign If Try ExceptHandler" - }, - { - "library": "django", - "name": "get_filter_kwargs_for_object", - "source_code": "def get_filter_kwargs_for_object(self, obj): return {self.name: getattr(obj, self.attname)}", - "docstring": "Return a dict that when passed as kwargs to self.model.filter(), would yield all instances having the same value for this field as obj has.", - "type": "method", - "file_path": "django\\django\\db\\models\\fields\\__init__.py", - "ast_data": "FunctionDef name:get_filter_kwargs_for_object arguments arg:self arg:obj Return return:yes" - }, - { - "library": "numpy", - "name": "iscode", - "source_code": "def iscode(object): return isinstance(object, types.CodeType)", - "docstring": "Return true if the object is a code object. Code objects provide these attributes: co_argcount number of arguments (not including * or ** args) co_code string of raw compiled bytecode co_consts tuple of constants used in the bytecode co_filename name of file in which this code object was created co_firstlineno number of first line in Python source code co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg co_lnotab encoded mapping of line numbers to bytecode indices co_name name with which this code object was defined co_names tuple of names of local variables co_nlocals number of local variables co_stacksize virtual machine stack space required co_varnames tuple of names of arguments and local variables", - "type": "function", - "file_path": "numpy\\numpy\\_utils\\_inspect.py", - "ast_data": "FunctionDef name:iscode arguments arg:object Return return:yes" - }, - { - "library": "pytorch", - "name": "from_choice_args", - "source_code": "@classmethod def from_choice_args(cls, example_inputs: list[torch.Tensor], example_inputs_extern: list[torch.Tensor], out: torch.Tensor, out_extern: torch.Tensor, expected: Optional[torch.Tensor] = None) -> Self: return cls(triton = BenchmarkTensors(example_inputs, out), extern = BenchmarkTensors(example_inputs_extern, out_extern), expected = expected)", - "docstring": "Factory method to create AutotuneInputs from separate inputs/outputs", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py", - "ast_data": "FunctionDef name:from_choice_args arguments arg:cls arg:example_inputs type:list[torch.Tensor] arg:example_inputs_extern type:list[torch.Tensor] arg:out type:torch.Tensor arg:out_extern type:torch.Tensor arg:expected type:Optional[torch.Tensor] Return return:yes" - }, - { - "library": "pandas", - "name": "itertuples", - "source_code": "def itertuples(self, index: bool = True, name: str | None = 'Pandas') -> Iterable[tuple[Any, ...]]: arrays = [] fields = list(self.columns) if index: arrays.append(self.index) fields.insert(0, 'Index') arrays.extend((self.iloc[:, k] for k in range(len(self.columns)))) if name is not None: itertuple = collections.namedtuple(name, fields, rename = True) return map(itertuple._make, zip(*arrays)) return zip(*arrays)", - "docstring": "Iterate over DataFrame rows as namedtuples. Parameters ---------- index : bool, default True If True, return the index as the first element of the tuple. name : str or None, default \"Pandas\" The name of the returned namedtuples or None to return regular tuples. Returns ------- iterator An object to iterate over namedtuples for each row in the DataFrame with the first field possibly being the index and following fields being the column values. See Also -------- DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. DataFrame.items : Iterate over (column name, Series) pairs. Notes ----- The column names will be renamed to positional names if they are invalid Python identifiers, repeated, or start with an underscore. Examples -------- >>> df = pd.DataFrame( ... {\"num_legs\": [4, 2], \"num_wings\": [0, 2]}, index=[\"dog\", \"hawk\"] ... ) >>> df num_legs num_wings dog 4 0 hawk 2 2 >>> for row in df.itertuples(): ... print(row) Pandas(Index='dog', num_legs=4, num_wings=0) Pandas(Index='hawk', num_legs=2, num_wings=2) By setting the parameter to False we can remove the index as the first element of the tuple: >>> for row in df.itertuples(index=False): ... print(row) Pandas(num_legs=4, num_wings=0) Pandas(num_legs=2, num_wings=2) With the parameter set we set a custom name for the yielded namedtuples: >>> for row in df.itertuples(name=\"Animal\"): ... print(row) Animal(Index='dog', num_legs=4, num_wings=0) Animal(Index='hawk', num_legs=2, num_wings=2)", - "type": "method", - "file_path": "pandas\\pandas\\core\\frame.py", - "ast_data": "FunctionDef name:itertuples arguments arg:self arg:index type:bool arg:name type:str | None Assign Assign Call call:list If If Compare op:IsNot Assign Call call:namedtuple Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "item_extra_kwargs", - "source_code": "def item_extra_kwargs(self, item): return {}", - "docstring": "Return an extra keyword arguments dictionary that is used with the call of the feed generator.", - "type": "method", - "file_path": "django\\django\\contrib\\syndication\\views.py", - "ast_data": "FunctionDef name:item_extra_kwargs arguments arg:self arg:item Return return:yes" - }, - { - "library": "tensorflow", - "name": "add_tensor_filter", - "source_code": "def add_tensor_filter(self, filter_name, tensor_filter): self._tensor_filters[filter_name] = tensor_filter", - "docstring": "Add a tensor filter. Args: filter_name: () name of the filter. tensor_filter: () the filter callable. See the doc string of for more details about its signature.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\local_cli_wrapper.py", - "ast_data": "FunctionDef name:add_tensor_filter arguments arg:self arg:filter_name arg:tensor_filter Assign" - }, - { - "library": "pytorch", - "name": "wait_tensor", - "source_code": "def wait_tensor(tensor): return torch.ops._c10d_functional.wait_tensor(tensor)", - "docstring": "Wait on a tensor returned by the collectives ops. Waiting follows device semantics, which means blocking on CPU and synchronizing streams on CUDA.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py", - "ast_data": "FunctionDef name:wait_tensor arguments arg:tensor Return return:yes" - }, - { - "library": "numpy", - "name": "iterable", - "source_code": "@set_module('numpy') def iterable(y): try: iter(y) except TypeError: return False return True", - "docstring": "Check whether or not an object can be iterated over. Parameters ---------- y : object Input object. Returns ------- b : bool Return ``. One notable exception is the treatment of 0-dimensional arrays:: >>> from collections.abc import Iterable >>> a = np.array(1.0) # 0-dimensional numpy array >>> isinstance(a, Iterable) True >>> np.iterable(a) False", - "type": "function", - "file_path": "numpy\\numpy\\lib\\_function_base_impl.py", - "ast_data": "FunctionDef name:iterable arguments arg:y Call call:set_module Try ExceptHandler Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "ElasticAgent", - "source_code": "class ElasticAgent(abc.ABC): @abc.abstractmethod def run(self, role: str = DEFAULT_ROLE) -> RunResult: raise NotImplementedError @abc.abstractmethod def get_worker_group(self, role: str = DEFAULT_ROLE) -> WorkerGroup: raise NotImplementedError", - "docstring": "An agent process responsible for managing one or more worker processes. The worker processes are assumed to be regular distributed PyTorch scripts. When the worker process is created by the agent, the agent provides the necessary information for the worker processes to properly initialize a torch process group. The exact deployment topology and ratio of agent-to-worker is dependent on the specific implementation of the agent and the user's job placement preferences. For instance, to run a distributed training job on GPU with 8 trainers (one per GPU) one can: 1. Use 8 x single GPU instances, place an agent per instance, managing 1 worker per agent. 2. Use 4 x double GPU instances, place an agent per instance, managing 2 workers per agent. 3. Use 2 x quad GPU instances, place an agent per instance, managing 4 workers per agent. 4. Use 1 x 8 GPU instance, place an agent per instance, managing 8 workers per agent. Usage :: group_result = agent.run() if group_result.is_failed(): # workers failed failure = group_result.failures[0] logger.exception(\"worker 0 failed with exit code : %s\", failure.exit_code) else: return group_result.return_values[0] # return rank 0's results", - "type": "class", - "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py", - "ast_data": "ClassDef name:ElasticAgent FunctionDef name:run arguments arg:self arg:role type:str Raise raises:NotImplementedError FunctionDef name:get_worker_group arguments arg:self arg:role type:str Raise raises:NotImplementedError" - }, - { - "library": "pandas", - "name": "from_frame", - "source_code": "@classmethod def from_frame(cls, df: DataFrame, sortorder: int | None = None, names: Sequence[Hashable] | Hashable | None = None) -> MultiIndex: if not isinstance(df, ABCDataFrame): raise TypeError('Input must be a DataFrame') column_names, columns = zip(*df.items()) names = column_names if names is None else names return cls.from_arrays(columns, sortorder = sortorder, names = names)", - "docstring": "Make a MultiIndex from a DataFrame. Parameters ---------- df : DataFrame DataFrame to be converted to MultiIndex. sortorder : int, optional Level of sortedness (must be lexicographically sorted by that level). names : list-like, optional If no names are provided, use the column names, or tuple of column names if the columns is a MultiIndex. If a sequence, overwrite names with the given sequence. Returns ------- MultiIndex The MultiIndex representation of the given DataFrame. See Also -------- MultiIndex.from_arrays : Convert list of arrays to MultiIndex. MultiIndex.from_tuples : Convert list of tuples to MultiIndex. MultiIndex.from_product : Make a MultiIndex from cartesian product of iterables. Examples -------- >>> df = pd.DataFrame( ... [[\"HI\", \"Temp\"], [\"HI\", \"Precip\"], [\"NJ\", \"Temp\"], [\"NJ\", \"Precip\"]], ... columns=[\"a\", \"b\"], ... ) >>> df a b 0 HI Temp 1 HI Precip 2 NJ Temp 3 NJ Precip >>> pd.MultiIndex.from_frame(df) MultiIndex([('HI', 'Temp'), ('HI', 'Precip'), ('NJ', 'Temp'), ('NJ', 'Precip')], names=['a', 'b']) Using explicit names, instead of the column names >>> pd.MultiIndex.from_frame(df, names=[\"state\", \"observation\"]) MultiIndex([('HI', 'Temp'), ('HI', 'Precip'), ('NJ', 'Temp'), ('NJ', 'Precip')], names=['state', 'observation'])", - "type": "method", - "file_path": "pandas\\pandas\\core\\indexes\\multi.py", - "ast_data": "FunctionDef name:from_frame arguments arg:cls arg:df type:DataFrame arg:sortorder type:int | None arg:names type:Sequence[Hashable] | Hashable | None If Raise raises:TypeError('Input must be a DataFrame') Assign Call call:zip Assign Return return:yes" - }, - { - "library": "numpy", - "name": "markinnerspaces", - "source_code": "def markinnerspaces(line): fragment = '' inside = False current_quote = None escaped = '' for c in line: if escaped = = '\\\\' and c in ['\\\\', \"'\", '\"']: fragment + = c escaped = c continue if not inside and c in [\"'\", '\"']: current_quote = c if c = = current_quote: inside = not inside elif c = = ' ' and inside: fragment + = '@_@' continue fragment + = c escaped = c return fragment", - "docstring": "The function replace all spaces in the input variable line which are surrounded with quotation marks, with the triplet \"@_@\". For instance, for the input \"a 'b c'\" the function returns \"a 'b@_@c'\" Parameters ---------- line : str Returns ------- str", - "type": "function", - "file_path": "numpy\\numpy\\f2py\\crackfortran.py", - "ast_data": "FunctionDef name:markinnerspaces arguments arg:line Assign Assign Assign Assign For If BoolOp Compare op:Eq Compare op:In Assign If BoolOp Compare op:In Assign If Compare op:Eq Assign If BoolOp Compare op:Eq Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "apply", - "source_code": "def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None = None) -> Any: return [torch.view_as_real(output.resolve_conj()) if isinstance(output, torch.Tensor) and torch.is_complex(output) else output for output in model_outputs]", - "docstring": "Convert float tensors to complex tensors. Args: model_output: The model output. model: The PyTorch model. Returns: A tuple of the model output.", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py", - "ast_data": "FunctionDef name:apply arguments arg:self arg:model_outputs type:Any arg:model type:torch.nn.Module | Callable | torch_export.ExportedProgram | None Return return:yes" - }, - { - "library": "tensorflow", - "name": "transform", - "source_code": "def transform(self, obj, user_context): if inspect.isfunction(obj) or inspect.ismethod(obj): return self.transform_function(obj, user_context) raise NotImplementedError('Non-function: {}'.format(type(obj)))", - "docstring": "Transforms a Python object. Users typically call this method. Args: obj: A Python object, function, type, etc. user_context: An opaque object (may be None) that is forwarded to transform_ast, through the ctx.user attribute. Returns: The result of calling transform_function. Raises: NotImplementedError: if the type of obj is not handled.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\transpiler.py", - "ast_data": "FunctionDef name:transform arguments arg:self arg:obj arg:user_context If BoolOp Call call:isfunction Call call:ismethod Return return:yes Raise raises:NotImplementedError('Non-function: {}'.format(type(obj)))" - }, - { - "library": "scipy", - "name": "Langermann", - "source_code": "class Langermann(Benchmark): def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([0.0] * self.N, [10.0] * self.N)) self.global_optimum = [[2.00299219, 1.006096]] self.fglob = -5.1621259 def fun(self, x, *args): self.nfev + = 1 a = [3, 5, 2, 1, 7] b = [5, 2, 1, 4, 9] c = [1, 2, 5, 2, 3] return -sum(c * exp(-(1 / pi) * ((x[0] - a) ** 2 + (x[1] - b) ** 2)) * cos(pi * ((x[0] - a) ** 2 + (x[1] - b) ** 2)))", - "docstring": "Langermann objective function. This class defines the Langermann [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Langermann}}(x) = - \\sum_{i=1}^{5} \\frac{c_i \\cos\\left\\{\\pi \\left[\\left(x_{1}- a_i\\right)^{2} + \\left(x_{2} - b_i \\right)^{2}\\right]\\right\\}}{e^{\\frac{\\left( x_{1} - a_i\\right)^{2} + \\left( x_{2} - b_i\\right)^{2}}{\\pi}}} Where: .. math:: \\begin{matrix} a = [3, 5, 2, 1, 7]\\\\ b = [5, 2, 1, 4, 9]\\\\ c = [1, 2, 5, 2, 3] \\\\ \\end{matrix} Here :math: for :math:. *Global optimum*: :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015 TODO: Langermann from Gavana is _not the same_ as Jamil #68.", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_L.py", - "ast_data": "ClassDef name:Langermann FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Assign Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "total_run_calls", - "source_code": "@property @deprecated(None, 'Track steps using a tf.Variable saved in checkpoint instead.') @doc_controls.do_not_generate_docs def total_run_calls(self): if self._platform_device = = failure_handling_util.PlatformDevice.INTERNAL_TPU: raise NotImplementedError('Please create variables saved in checkpoint to keep track of steps and epochs.') return self._run_counter", - "docstring": "Returns the number of times is called. DEPRECATED: user should track total steps themselves, as this API provides little expressivity gain but could easily be misused and incurs extra synchronization cost for TPUStrategy users. This value tracks the number of all calls to including those before the program is restarted and the training is restored, by saving and reading the value in the checkpoint. A user can compute their total number of iterations by , while should be one for users. They can also use this value to infer the starting epoch and step after training restores, as shown in the example above.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\failure_handling\\failure_handling.py", - "ast_data": "FunctionDef name:total_run_calls arguments arg:self Call call:deprecated If Compare op:Eq Raise raises:NotImplementedError('Please create variables saved in checkpoint to keep track of steps and epochs.') Return return:yes" - }, - { - "library": "scipy", - "name": "present", - "source_code": "def present(x): return x is not None", - "docstring": "This is a Python equivalent of the Fortran 'present' function for optional arguments.", - "type": "function", - "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\present.py", - "ast_data": "FunctionDef name:present arguments arg:x Return return:yes" - }, - { - "library": "scikit-learn", - "name": "fit", - "source_code": "@_fit_context(prefer_skip_nested_validation = True) def fit(self, X, y = None): if self.fit_inverse_transform and self.kernel = = 'precomputed': raise ValueError('Cannot fit_inverse_transform with a precomputed kernel.') X = validate_data(self, X, accept_sparse = 'csr', copy = self.copy_X) self.gamma_ = 1 / X.shape[1] if self.gamma is None else self.gamma self._centerer = KernelCenterer().set_output(transform = 'default') K = self._get_kernel(X) self._fit_transform_in_place(K) if self.fit_inverse_transform: X_transformed = self.eigenvectors_ * np.sqrt(self.eigenvalues_) self._fit_inverse_transform(X_transformed, X) self.X_fit_ = X return self", - "docstring": "Fit the model from data in X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\decomposition\\_kernel_pca.py", - "ast_data": "FunctionDef name:fit arguments arg:self arg:X arg:y Call call:_fit_context If BoolOp Compare op:Eq Raise raises:ValueError('Cannot fit_inverse_transform with a precomputed kernel.') Assign Call call:validate_data Assign Assign Call call:set_output Assign Call call:_get_kernel If Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "enable_run_metadata", - "source_code": "def enable_run_metadata(self): self.ensure_initialized() pywrap_tfe.TFE_ContextEnableRunMetadata(self._handle)", - "docstring": "Enables tracing of op execution via RunMetadata. To retrieve the accumulated metadata call context.export_run_metadata() and to stop tracing call context.disable_run_metadata().", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", - "ast_data": "FunctionDef name:enable_run_metadata arguments arg:self" - }, - { - "library": "django", - "name": "LayerOptionAction", - "source_code": "class LayerOptionAction(argparse.Action): def __call__(self, parser, namespace, value, option_string = None): try: setattr(namespace, self.dest, int(value)) except ValueError: setattr(namespace, self.dest, value)", - "docstring": "Custom argparse action for the keyword option which may be an integer or a string.", - "type": "class", - "file_path": "django\\django\\contrib\\gis\\management\\commands\\ogrinspect.py", - "ast_data": "ClassDef name:LayerOptionAction FunctionDef name:__call__ arguments arg:self arg:parser arg:namespace arg:value arg:option_string Try ExceptHandler" - }, - { - "library": "pytorch", - "name": "register_backward_hook", - "source_code": "def register_backward_hook(self, hook: Callable[['Module', _grad_t, _grad_t], Union[None, _grad_t]]) -> RemovableHandle: if self._is_full_backward_hook is True: raise RuntimeError('Cannot use both regular backward hooks and full backward hooks on a single Module. Please use only one of them.') self._is_full_backward_hook = False handle = RemovableHandle(self._backward_hooks) self._backward_hooks[handle.id] = hook return handle", - "docstring": "Register a backward hook on the module. This function is deprecated in favor of :meth: and the behavior of this function will change in future versions. Returns: :class:: a handle that can be used to remove the added hook by calling ``", - "type": "method", - "file_path": "pytorch\\torch\\nn\\modules\\module.py", - "ast_data": "FunctionDef name:register_backward_hook arguments arg:self arg:hook type:Callable[['Module', _grad_t, _grad_t], Union[None, _grad_t]] If Compare op:Is Raise raises:RuntimeError('Cannot use both regular backward hooks and full backward hooks on a single Module. Please use only one of them.') Assign Assign Call call:RemovableHandle Assign Return return:yes" - }, - { - "library": "numpy", - "name": "polymul", - "source_code": "def polymul(c1, c2): [c1, c2] = pu.as_series([c1, c2]) ret = np.convolve(c1, c2) return pu.trimseq(ret)", - "docstring": "Multiply one polynomial by another. Returns the product of two polynomials * . The arguments are sequences of coefficients, from lowest order term to highest, e.g., [1,2,3] represents the polynomial `` Parameters ---------- c1, c2 : array_like 1-D arrays of coefficients representing a polynomial, relative to the \"standard\" basis, and ordered from lowest order term to highest. Returns ------- out : ndarray Of the coefficients of their product. See Also -------- polyadd, polysub, polymulx, polydiv, polypow Examples -------- >>> from numpy.polynomial import polynomial as P >>> c1 = (1, 2, 3) >>> c2 = (3, 2, 1) >>> P.polymul(c1, c2) array([ 3., 8., 14., 8., 3.])", - "type": "function", - "file_path": "numpy\\numpy\\polynomial\\polynomial.py", - "ast_data": "FunctionDef name:polymul arguments arg:c1 arg:c2 Assign Call call:as_series Assign Call call:convolve Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_axisbelow", - "source_code": "def set_axisbelow(self, b): self._axisbelow = axisbelow = validate_axisbelow(b) zorder = {True: 0.5, 'line': 1.5, False: 2.5}[axisbelow] for axis in self._axis_map.values(): axis.set_zorder(zorder) self.stale = True", - "docstring": "Set whether axis ticks and gridlines are above or below most artists. This controls the zorder of the ticks and gridlines. For more information on the zorder see :doc:. Parameters ---------- b : bool or 'line' Possible values: - *True* (zorder = 0.5): Ticks and gridlines are below patches and lines, though still above images. - 'line' (zorder = 1.5): Ticks and gridlines are above patches (e.g. rectangles, with default zorder = 1) but still below lines and markers (with their default zorder = 2). - *False* (zorder = 2.5): Ticks and gridlines are above patches and lines / markers. Notes ----- For more control, call the method of each axis. See Also -------- get_axisbelow", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", - "ast_data": "FunctionDef name:set_axisbelow arguments arg:self arg:b Assign Call call:validate_axisbelow Assign For Call call:values Assign" - }, - { - "library": "scikit-learn", - "name": "predict_log_proba", - "source_code": "def predict_log_proba(self, X): proba = self.predict_proba(X) if self.n_outputs_ = = 1: return np.log(proba) else: for k in range(self.n_outputs_): proba[k] = np.log(proba[k]) return proba", - "docstring": "Predict class log-probabilities of the input samples X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to `classes_`.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\tree\\_classes.py", - "ast_data": "FunctionDef name:predict_log_proba arguments arg:self arg:X Assign Call call:predict_proba If Compare op:Eq Return return:yes For Call call:range Assign Call call:log Return return:yes" - }, - { - "library": "tensorflow", - "name": "exact_gaussian_kernel", - "source_code": "def exact_gaussian_kernel(x, y, stddev): x_aligned, y_aligned = _align_matrices(x, y) diff_squared_l2_norm = math_ops.reduce_sum(math_ops.squared_difference(x_aligned, y_aligned), 2) return math_ops.exp(-diff_squared_l2_norm / (2 * stddev * stddev))", - "docstring": "Computes exact Gaussian kernel value(s) for tensors x and y and stddev. The Gaussian kernel for vectors u, v is defined as follows: K(u, v) = exp(-||u-v||^2 / (2* stddev^2)) where the norm is the l2-norm. x, y can be either vectors or matrices. If they are vectors, they must have the same dimension. If they are matrices, they must have the same number of columns. In the latter case, the method returns (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and v is a row from y. Args: x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim]. y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim]. stddev: The width of the Gaussian kernel. Returns: A single value (scalar) with shape (1, 1) (if x, y are vectors) or a matrix of shape (m, n) with entries K(u, v) (where K is the Gaussian kernel) for all (u,v) pairs where u, v are rows from x and y respectively. Raises: ValueError: if the shapes of x, y are not compatible.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\kernelized_utils.py", - "ast_data": "FunctionDef name:exact_gaussian_kernel arguments arg:x arg:y arg:stddev Assign Call call:_align_matrices Assign Call call:reduce_sum Return return:yes" - }, - { - "library": "pytorch", - "name": "LogisticNormal", - "source_code": "class LogisticNormal(TransformedDistribution): arg_constraints = {'loc': constraints.real, 'scale': constraints.positive} support = constraints.simplex has_rsample = True base_dist: Independent[Normal] def __init__(self, loc: Union[Tensor, float], scale: Union[Tensor, float], validate_args: Optional[bool] = None) -> None: base_dist = Normal(loc, scale, validate_args = validate_args) if not base_dist.batch_shape: base_dist = base_dist.expand([1]) super().__init__(base_dist, StickBreakingTransform(), validate_args = validate_args) def expand(self, batch_shape, _instance = None): new = self._get_checked_instance(LogisticNormal, _instance) return super().expand(batch_shape, _instance = new) @property def loc(self) -> Tensor: return self.base_dist.base_dist.loc @property def scale(self) -> Tensor: return self.base_dist.base_dist.scale", - "docstring": "Creates a logistic-normal distribution parameterized by :attr: and :attr: that define the base distribution transformed with the such that:: X ~ LogisticNormal(loc, scale) Y = log(X / (1 - X.cumsum(-1)))[..., :-1] ~ Normal(loc, scale) Args: loc (float or Tensor): mean of the base distribution scale (float or Tensor): standard deviation of the base distribution Example:: >>> # logistic-normal distributed with mean=(0, 0, 0) and stddev=(1, 1, 1) >>> # of the base Normal distribution >>> # xdoctest: +IGNORE_WANT(\"non-deterministic\") >>> m = LogisticNormal(torch.tensor([0.0] * 3), torch.tensor([1.0] * 3)) >>> m.sample() tensor([ 0.7653, 0.0341, 0.0579, 0.1427])", - "type": "class", - "file_path": "pytorch\\torch\\distributions\\logistic_normal.py", - "ast_data": "ClassDef name:LogisticNormal Assign Assign Assign FunctionDef name:__init__ arguments arg:self arg:loc type:Union[Tensor, float] arg:scale type:Union[Tensor, float] arg:validate_args type:Optional[bool] Assign Call call:Normal If Assign Call call:expand FunctionDef name:expand arguments arg:self arg:batch_shape arg:_instance Assign Call call:_get_checked_instance Return return:yes FunctionDef name:loc arguments arg:self Return return:yes FunctionDef name:scale arguments arg:self Return return:yes" - }, - { - "library": "pandas", - "name": "pop_header_name", - "source_code": "def pop_header_name(row: list[Hashable], index_col: int | Sequence[int]) -> tuple[Hashable | None, list[Hashable]]: if is_list_like(index_col): assert isinstance(index_col, Iterable) i = max(index_col) else: assert not isinstance(index_col, Iterable) i = index_col header_name = row[i] header_name = None if header_name = = '' else header_name return (header_name, row[: i] + [''] + row[i + 1:])", - "docstring": "Pop the header name for MultiIndex parsing. Parameters ---------- row : list The data row to parse for the header name. index_col : int, list The index columns for our data. Assumed to be non-null. Returns ------- header_name : str The extracted header name. trimmed_row : list The original data row with the header name removed.", - "type": "function", - "file_path": "pandas\\pandas\\io\\excel\\_util.py", - "ast_data": "FunctionDef name:pop_header_name arguments arg:row type:list[Hashable] arg:index_col type:int | Sequence[int] If Call call:is_list_like Assign Call call:max Assign Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "init", - "source_code": "def init(): _lazy_init()", - "docstring": "Initialize PyTorch's CUDA state. You may need to call this explicitly if you are interacting with PyTorch via its C API, as Python bindings for CUDA functionality will not be available until this initialization takes place. Ordinary users should not need this, as all of PyTorch's CUDA methods automatically initialize CUDA state on-demand. Does nothing if the CUDA state is already initialized.", - "type": "function", - "file_path": "pytorch\\torch\\cuda\\__init__.py", - "ast_data": "FunctionDef name:init arguments" - }, - { - "library": "tensorflow", - "name": "TypeSpecBatchEncoder", - "source_code": "class TypeSpecBatchEncoder(object, metaclass = abc.ABCMeta): @abc.abstractmethod def batch(self, spec, batch_size): raise NotImplementedError(f'{type(self).__name__}.batch') @abc.abstractmethod def unbatch(self, spec): raise NotImplementedError(f'{type(self).__name__}.unbatch') @abc.abstractmethod def encode(self, spec, value, minimum_rank = 0): raise NotImplementedError(f'{type(self).__name__}.encode') @abc.abstractmethod def decode(self, spec, encoded_value): raise NotImplementedError(f'{type(self).__name__}.decode') @abc.abstractmethod def encoding_specs(self, spec): raise NotImplementedError(f'{type(self).__name__}.encoding_specs')", - "docstring": "Class used to encode and decode composite tensor values for batching. In order to be batched and unbatched by APIs such as and , composite tensors must be encoded using flat tensors that can themselves be batched or unbatched. s are responsible for implementing this encoding. If a composite tensor's shape is a prefix of the shape of all of its component tensors, then this encoding can usually be performed by just returning those component tensors as a list. But if the composite tensor has components whose shape has a more complex relationship to the shape of the composite tensor, then a custom may need to be implemented.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py", - "ast_data": "ClassDef name:TypeSpecBatchEncoder FunctionDef name:batch arguments arg:self arg:spec arg:batch_size Raise raises:NotImplementedError(f'{type(self).__name__}.batch') FunctionDef name:unbatch arguments arg:self arg:spec Raise raises:NotImplementedError(f'{type(self).__name__}.unbatch') FunctionDef name:encode arguments arg:self arg:spec arg:value arg:minimum_rank Raise raises:NotImplementedError(f'{type(self).__name__}.encode') FunctionDef name:decode arguments arg:self arg:spec arg:encoded_value Raise raises:NotImplementedError(f'{type(self).__name__}.decode') FunctionDef name:encoding_specs arguments arg:self arg:spec Raise raises:NotImplementedError(f'{type(self).__name__}.encoding_specs')" - }, - { - "library": "algorithms", - "name": "interpolation_search", - "source_code": "def interpolation_search(array: List[int], search_key: int) -> int: high = len(array) - 1 low = 0 while low < = high and array[low] < = search_key < = array[high]: pos = low + int((search_key - array[low]) * (high - low) / (array[high] - array[low])) if array[pos] = = search_key: return pos if array[pos] < search_key: low = pos + 1 else: high = pos - 1 return -1", - "docstring": ":param array: The array to be searched. :param search_key: The key to be searched in the array. :returns: Index of search_key in array if found, else -1. Examples: >>> interpolation_search([-25, -12, -1, 10, 12, 15, 20, 41, 55], -1) 2 >>> interpolation_search([5, 10, 12, 14, 17, 20, 21], 55) -1 >>> interpolation_search([5, 10, 12, 14, 17, 20, 21], -5) -1", - "type": "function", - "file_path": "algorithms\\algorithms\\search\\interpolation_search.py", - "ast_data": "FunctionDef name:interpolation_search arguments arg:array type:List[int] arg:search_key type:int Assign Assign While BoolOp Compare op:LtE Compare op:LtE op:LtE Assign If Compare op:Eq Return return:yes If Compare op:Lt Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "set_weight_collections", - "source_code": "def set_weight_collections(self, weight_collections): self._weight_collections = weight_collections", - "docstring": "Sets the weight collections for the layer. Args: weight_collections: A list of collection names to which the Variable will be added.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column.py", - "ast_data": "FunctionDef name:set_weight_collections arguments arg:self arg:weight_collections Assign" - }, - { - "library": "salmon", - "name": "clear", - "source_code": "def clear(self): self.confirmations.clear()", - "docstring": "Used primarily in testing, this clears out all pending confirmations.", - "type": "method", - "file_path": "salmon\\salmon\\confirm.py", - "ast_data": "FunctionDef name:clear arguments arg:self" - }, - { - "library": "matplotlib", - "name": "__call__", - "source_code": "def __call__(self, value, clip = None): if clip is None: clip = self.clip result, is_scalar = self.process_value(value) if self.vmin is None or self.vmax is None: self.autoscale_None(result) (vmin,), _ = self.process_value(self.vmin) (vmax,), _ = self.process_value(self.vmax) if vmin = = vmax: result.fill(0) elif vmin > vmax: raise ValueError('minvalue must be less than or equal to maxvalue') else: if clip: mask = np.ma.getmask(result) result = np.ma.array(np.clip(result.filled(vmax), vmin, vmax), mask = mask) resdat = result.data resdat - = vmin resdat / = vmax - vmin result = np.ma.array(resdat, mask = result.mask, copy = False) if is_scalar: result = result[0] return result", - "docstring": "Normalize the data and return the normalized data. Parameters ---------- value Data to normalize. clip : bool, optional See the description of the parameter *clip* in . If ``.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\colors.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:value arg:clip If Compare op:Is Assign Assign Call call:process_value If BoolOp Compare op:Is Compare op:Is Assign Call call:process_value Assign Call call:process_value If Compare op:Eq If Compare op:Gt Raise raises:ValueError('minvalue must be less than or equal to maxvalue') If Assign Call call:getmask Assign Call call:array Assign Assign Call call:array If Assign Return return:yes" - }, - { - "library": "coconut", - "name": "show_sig", - "source_code": "def show_sig(self, *messages, **kwargs): if not self.quiet: self.display(messages, main_sig, **kwargs)", - "docstring": "Prints messages with main signature if not --quiet.", - "type": "method", - "file_path": "coconut\\coconut\\terminal.py", - "ast_data": "FunctionDef name:show_sig arguments arg:self vararg:messages kwarg:kwargs If" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, transform, loc, pad = 0.4, borderpad = 0.5, prop = None, frameon = True, **kwargs): self.drawing_area = AuxTransformBox(transform) super().__init__(loc, pad = pad, borderpad = borderpad, child = self.drawing_area, prop = prop, frameon = frameon, **kwargs)", - "docstring": "An anchored container with transformed coordinates. Artists added to the *drawing_area* are scaled according to the coordinates of the transformation used. The dimensions of this artist will scale to contain the artists added. Parameters ---------- transform : The transformation object for the coordinate system in use, i.e., :attr:. loc : str Location of this artist. Valid locations are 'upper left', 'upper center', 'upper right', 'center left', 'center', 'center right', 'lower left', 'lower center', 'lower right'. For backward compatibility, numeric values are accepted as well. See the parameter *loc* of for details. pad : float, default: 0.4 Padding around the child objects, in fraction of the font size. borderpad : float, default: 0.5 Border padding, in fraction of the font size. prop : , optional Font property used as a reference for paddings. frameon : bool, default: True If True, draw a box around this artist. **kwargs Keyword arguments forwarded to . Attributes ---------- drawing_area : A container for artists to display. Examples -------- To display an ellipse in the upper left, with a width of 0.1 and height of 0.4 in data coordinates: >>> box = AnchoredAuxTransformBox(ax.transData, loc='upper left') >>> el = Ellipse((0, 0), width=0.1, height=0.4, angle=30) >>> box.drawing_area.add_artist(el) >>> ax.add_artist(box)", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\anchored_artists.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:transform arg:loc arg:pad arg:borderpad arg:prop arg:frameon kwarg:kwargs Assign Call call:AuxTransformBox" - }, - { - "library": "tensorflow", - "name": "devices", - "source_code": "def devices(self): return self._device_names", - "docstring": "Get the list of device names. Returns: ( of ) names of the devices.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", - "ast_data": "FunctionDef name:devices arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "can_codegen_without_upcasts", - "source_code": "def can_codegen_without_upcasts(prologue: 'SchedulerNode', disallow_fp32_ops: bool = False) -> bool: if prologue.get_operation_names() < = V.graph.low_precision_codegen_ops: return True low_prec_analysis = RecordLowPrecisionOps(disallow_fp32_ops) with config.patch('triton.codegen_upcast_to_fp32', False), V.set_ops_handler(low_prec_analysis): prologue._body(*prologue.get_ranges()) return not low_prec_analysis.low_precision_numeric_op", - "docstring": "Can this prologue be run without while preserving numerics. This is only true if the node only contains dtype conversions, indexing, and other non-arithmetic operators. If disallow_fp32_ops is True, then we also disallow ops that are explicitly computed in fp32 or fp64.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\analyze_preserves_zero_mask.py", - "ast_data": "FunctionDef name:can_codegen_without_upcasts arguments arg:prologue type:'SchedulerNode' arg:disallow_fp32_ops type:bool If Compare op:LtE Return return:yes Assign Call call:RecordLowPrecisionOps With Return return:yes" - }, - { - "library": "pytorch", - "name": "wait_all", - "source_code": "def wait_all(futures: list[Future]) -> list: return [fut.wait() for fut in torch._C._collect_all(cast(list[torch._C.Future], futures)).wait()]", - "docstring": "Waits for all provided futures to be complete, and returns the list of completed values. If any of the futures encounters an error, the method will exit early and report the error not waiting for other futures to complete. Args: futures (list): a list of :class: object. Returns: A list of the completed :class: results. This method will throw an error if `~torch.futures.Future` throws.", - "type": "function", - "file_path": "pytorch\\torch\\futures\\__init__.py", - "ast_data": "FunctionDef name:wait_all arguments arg:futures type:list[Future] Return return:yes" - }, - { - "library": "tensorflow", - "name": "tril", - "source_code": "@property def tril(self): return self._tril", - "docstring": "The lower triangular matrix defining this operator.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_lower_triangular.py", - "ast_data": "FunctionDef name:tril arguments arg:self Return return:yes" - }, - { - "library": "scikit-learn", - "name": "devices", - "source_code": "def devices(self) -> list[Device]: return ['cpu']", - "docstring": "The devices supported by NumPy. For NumPy, this always returns ``. Returns ------- devices : list[Device] The devices supported by NumPy. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes Examples -------- >>> info = np.__array_namespace_info__() >>> info.devices() ['cpu']", - "type": "method", - "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\numpy\\_info.py", - "ast_data": "FunctionDef name:devices arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "get_distribution_names", - "source_code": "def get_distribution_names(namespace_pairs, rv_base_class): distn_names = [] distn_gen_names = [] for name, value in namespace_pairs: if name.startswith('_'): continue if name.endswith('_gen') and issubclass(value, rv_base_class): distn_gen_names.append(name) if isinstance(value, rv_base_class): distn_names.append(name) return (distn_names, distn_gen_names)", - "docstring": "Collect names of statistical distributions and their generators. Parameters ---------- namespace_pairs : sequence A snapshot of (name, value) pairs in the namespace of a module. rv_base_class : class The base class of random variable generator classes in a module. Returns ------- distn_names : list of strings Names of the statistical distributions. distn_gen_names : list of strings Names of the generators of the statistical distributions. Note that these are not simply the names of the statistical distributions, with a _gen suffix added.", - "type": "function", - "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", - "ast_data": "FunctionDef name:get_distribution_names arguments arg:namespace_pairs arg:rv_base_class Assign Assign For If Call call:startswith If BoolOp Call call:endswith Call call:issubclass If Call call:isinstance Return return:yes" - }, - { - "library": "cherrypy", - "name": "get_ha1_dict_plain", - "source_code": "def get_ha1_dict_plain(user_password_dict): def get_ha1(realm, username): password = user_password_dict.get(username) if password: return md5_hex('%s: %s: %s' % (username, realm, password)) return None return get_ha1", - "docstring": "Return a get_ha1 function which obtains a plaintext password. user_password_dict is a dictionary of the form: {username : password}. If you want a simple dictionary-based authentication scheme, with plaintext passwords, use get_ha1_dict_plain(my_userpass_dict) as the value for the get_ha1 argument to digest_auth().", - "type": "function", - "file_path": "cherrypy\\cherrypy\\lib\\auth_digest.py", - "ast_data": "FunctionDef name:get_ha1_dict_plain arguments arg:user_password_dict FunctionDef name:get_ha1 arguments arg:realm arg:username Assign Call call:get If Return return:yes Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "cleanup", - "source_code": "def cleanup(self): if self.is_templatized: if os.path.exists(self.work_path): os.unlink(self.work_path)", - "docstring": "Remove a preprocessed copy of a translatable file (if any).", - "type": "method", - "file_path": "django\\django\\core\\management\\commands\\makemessages.py", - "ast_data": "FunctionDef name:cleanup arguments arg:self If If Call call:exists" - }, - { - "library": "tensorflow", - "name": "get_timestamped_export_dir", - "source_code": "def get_timestamped_export_dir(export_dir_base): attempts = 0 while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS: timestamp = int(time.time()) result_dir = file_io.join(compat.as_bytes(export_dir_base), compat.as_bytes(str(timestamp))) if not gfile.Exists(result_dir): return result_dir time.sleep(1) attempts + = 1 logging.warn('Directory {} already exists; retrying (attempt {}/{})'.format(compat.as_str(result_dir), attempts, MAX_DIRECTORY_CREATION_ATTEMPTS)) raise RuntimeError(f'Failed to obtain a unique export directory name after {MAX_DIRECTORY_CREATION_ATTEMPTS} attempts.')", - "docstring": "Builds a path to a new subdirectory within the base directory. Each export is written into a new subdirectory named using the current time. This guarantees monotonically increasing version numbers even across multiple runs of the pipeline. The timestamp used is the number of seconds since epoch UTC. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. Returns: The full path of the new subdirectory (which is not actually created yet). Raises: RuntimeError: if repeated attempts fail to obtain a unique timestamped directory name.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\saved_model\\model_utils\\export_utils.py", - "ast_data": "FunctionDef name:get_timestamped_export_dir arguments arg:export_dir_base Assign While Compare op:Lt Assign Call call:int Assign Call call:join If Return return:yes Raise raises:RuntimeError(f'Failed to obtain a unique export directory name after {MAX_DIRECTORY_CREATION_ATTEMPTS} attempts.')" - }, - { - "library": "tensorflow", - "name": "get_symbolic_inputs", - "source_code": "def get_symbolic_inputs(self, return_single_as_list = False): for i, (k, v) in enumerate(zip(self._input_names, self._flattened_inputs)): if isinstance(v, (list, float, int)): v = numpy_compat.np_asarray(v) if v.ndim = = 1: v = np.expand_dims(v, 1) if isinstance(v, np.ndarray): shape = (None,) + tuple(v.shape[1:]) if shape = = (None,): shape = (None, 1) dtype = dtypes.as_dtype(v.dtype) if dtype.is_floating: dtype = backend.floatx() v = backend.placeholder(shape = shape, name = k, dtype = dtype) elif isinstance(v, tensor_spec.TensorSpec): shape = (None,) + tuple(v.shape.as_list()[1:]) if shape = = (None,): shape = (None, 1) v = backend.placeholder(shape = shape, name = k, dtype = v.dtype) self._flattened_inputs[i] = v if self._is_dict: return dict(zip(self._input_names, self._flattened_inputs)) if self._is_single_input and (not return_single_as_list): return self._flattened_inputs[0] return self._flattened_inputs", - "docstring": "Returns inputs to be set as self.inputs for a model.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", - "ast_data": "FunctionDef name:get_symbolic_inputs arguments arg:self arg:return_single_as_list For Call call:enumerate If Call call:isinstance Assign Call call:np_asarray If Compare op:Eq Assign Call call:expand_dims If Call call:isinstance Assign If Compare op:Eq Assign Assign Call call:as_dtype If Assign Call call:floatx Assign Call call:placeholder If Call call:isinstance Assign If Compare op:Eq Assign Assign Call call:placeholder Assign If Return return:yes If BoolOp Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "storage_meta", - "source_code": "def storage_meta(self) -> Optional[StorageMeta]: return None", - "docstring": "Return the storage-specific metadata. This is used to store additional information in a checkpoint that can be useful for providing request-level observability. StorageMeta is passed to the `` during save calls. Returns None by default. TODO: provide an example", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py", - "ast_data": "FunctionDef name:storage_meta arguments arg:self Return return:yes" - }, - { - "library": "mongo", - "name": "__exit__", - "source_code": "def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> Any: if exc_type is None: self.close() else: object.__setattr__(self, '_closed', True) return False", - "docstring": "Support for the context manager protocol. Close the file if no exceptions occur and allow exceptions to propagate.", - "type": "method", - "file_path": "mongo\\gridfs\\synchronous\\grid_file.py", - "ast_data": "FunctionDef name:__exit__ arguments arg:self arg:exc_type type:Any arg:exc_val type:Any arg:exc_tb type:Any If Compare op:Is Return return:yes" - }, - { - "library": "pytorch", - "name": "atleast_1d", - "source_code": "def atleast_1d(*tensors): if has_torch_function(tensors): return handle_torch_function(atleast_1d, tensors, *tensors) if len(tensors) = = 1: tensors = tensors[0] return _VF.atleast_1d(tensors)", - "docstring": "Returns a 1-dimensional view of each input tensor with zero dimensions. Input tensors with one or more dimensions are returned as-is. Args: input (Tensor or list of Tensors) Returns: output (Tensor or tuple of Tensors) Example:: >>> x = torch.arange(2) >>> x tensor([0, 1]) >>> torch.atleast_1d(x) tensor([0, 1]) >>> x = torch.tensor(1.) >>> x tensor(1.) >>> torch.atleast_1d(x) tensor([1.]) >>> x = torch.tensor(0.5) >>> y = torch.tensor(1.) >>> torch.atleast_1d((x, y)) (tensor([0.5000]), tensor([1.]))", - "type": "function", - "file_path": "pytorch\\torch\\functional.py", - "ast_data": "FunctionDef name:atleast_1d arguments vararg:tensors If Call call:has_torch_function Return return:yes If Compare op:Eq Assign Return return:yes" - }, - { - "library": "feincms", - "name": "is_sibling_of", - "source_code": "@register.filter def is_sibling_of(page1, page2): try: return _is_sibling_of(page1, page2) except AttributeError: return False", - "docstring": "Determines whether a given page is a sibling of another page :: {% if page|is_sibling_of:feincms_page %} ... {% endif %}", - "type": "function", - "file_path": "feincms\\feincms\\templatetags\\feincms_page_tags.py", - "ast_data": "FunctionDef name:is_sibling_of arguments arg:page1 arg:page2 Try Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "django", - "name": "get_fields", - "source_code": "def get_fields(self, field_name): if field_name not in self.fields: raise GDALException('invalid field name: %s' % field_name) return [feat.get(field_name) for feat in self]", - "docstring": "Return a list containing the given field name for every Feature in the Layer.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py", - "ast_data": "FunctionDef name:get_fields arguments arg:self arg:field_name If Compare op:NotIn Raise raises:GDALException('invalid field name: %s' % field_name) Return return:yes" - }, - { - "library": "mongo", - "name": "from_uuid", - "source_code": "@classmethod def from_uuid(cls: Type[Binary], uuid: UUID, uuid_representation: int = UuidRepresentation.STANDARD) -> Binary: if not isinstance(uuid, UUID): raise TypeError(f'uuid must be an instance of uuid.UUID, not {type(uuid)}') if uuid_representation not in ALL_UUID_REPRESENTATIONS: raise ValueError('uuid_representation must be a value from bson.binary.UuidRepresentation') if uuid_representation = = UuidRepresentation.UNSPECIFIED: raise ValueError('cannot encode native uuid.UUID with UuidRepresentation.UNSPECIFIED. UUIDs can be manually converted to bson.Binary instances using bson.Binary.from_uuid() or a different UuidRepresentation can be configured. See the documentation for UuidRepresentation for more information.') subtype = OLD_UUID_SUBTYPE if uuid_representation = = UuidRepresentation.PYTHON_LEGACY: payload = uuid.bytes elif uuid_representation = = UuidRepresentation.JAVA_LEGACY: from_uuid = uuid.bytes payload = from_uuid[0: 8][: : -1] + from_uuid[8: 16][: : -1] elif uuid_representation = = UuidRepresentation.CSHARP_LEGACY: payload = uuid.bytes_le else: subtype = UUID_SUBTYPE payload = uuid.bytes return cls(payload, subtype)", - "docstring": "Create a BSON Binary object from a Python UUID. Creates a :class: object from a :class: instance. Assumes that the native :class: instance uses the byte-order implied by the provided `TypeErroruuid~uuid.UUIDuuid.UUID~bson.binary.UuidRepresentation~bson.binary.UuidRepresentation.STANDARDhandling-uuid-data-example` for details. .. versionadded:: 3.11", - "type": "method", - "file_path": "mongo\\bson\\binary.py", - "ast_data": "FunctionDef name:from_uuid arguments arg:cls type:Type[Binary] arg:uuid type:UUID arg:uuid_representation type:int If Raise raises:TypeError(f'uuid must be an instance of uuid.UUID, not {type(uuid)}') If Compare op:NotIn Raise raises:ValueError('uuid_representation must be a value from bson.binary.UuidRepresentation') If Compare op:Eq Raise raises:ValueError('cannot encode native uuid.UUID with UuidRepresentation.UNSPECIFIED. UUIDs can be manually converted to bson.Binary instances using bson.Binary.from_uuid() or a different UuidRepresentation can be configured. See the documentation for UuidRepresentation for more information.') Assign If Compare op:Eq Assign If Compare op:Eq Assign Assign If Compare op:Eq Assign Assign Assign Return return:yes" - }, - { - "library": "mongo", - "name": "get_key_by_alt_name", - "source_code": "async def get_key_by_alt_name(self, key_alt_name: str) -> Optional[RawBSONDocument]: self._check_closed() assert self._key_vault_coll is not None return await self._key_vault_coll.find_one({'keyAltNames': key_alt_name})", - "docstring": "Get a key document in the key vault collection that has the given ``. :param key_alt_name: (str): The key alternate name of the key to get. :return: The key document. .. versionadded:: 4.2", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\encryption.py", - "ast_data": "AsyncFunctionDef name:get_key_by_alt_name arguments arg:self arg:key_alt_name type:str Return return:yes" - }, - { - "library": "django", - "name": "configured", - "source_code": "@property def configured(self): return self._wrapped is not empty", - "docstring": "Return True if the settings have already been configured.", - "type": "method", - "file_path": "django\\django\\conf\\__init__.py", - "ast_data": "FunctionDef name:configured arguments arg:self Return return:yes" - }, - { - "library": "kornia", - "name": "list", - "source_code": "@classmethod def list(cls) -> list[str]: return [c.name for c in cls]", - "docstring": "Return a list of names of enumeration members. Returns: A list containing the names of enumeration members.", - "type": "method", - "file_path": "kornia\\kornia\\color\\colormap.py", - "ast_data": "FunctionDef name:list arguments arg:cls Return return:yes" - }, - { - "library": "scikit-learn", - "name": "predict", - "source_code": "def predict(self, X): check_is_fitted(self) X = self._check_test_data(X) x_squared_norms = row_norms(X, squared = True) sample_weight = np.ones_like(x_squared_norms) labels = self._predict_recursive(X, sample_weight, self._bisecting_tree) return labels", - "docstring": "Predict which cluster each sample in X belongs to. Prediction is made by going down the hierarchical tree in searching of closest leaf cluster. In the vector quantization literature, is called the code book and each value returned by is the index of the closest code in the code book. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) New data to predict. Returns ------- labels : ndarray of shape (n_samples,) Index of the cluster each sample belongs to.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\cluster\\_bisect_k_means.py", - "ast_data": "FunctionDef name:predict arguments arg:self arg:X Assign Call call:_check_test_data Assign Call call:row_norms Assign Call call:ones_like Assign Call call:_predict_recursive Return return:yes" - }, - { - "library": "cherrypy", - "name": "attributes", - "source_code": "def attributes(full_attribute_name): last_dot = full_attribute_name.rfind('.') attr_name = full_attribute_name[last_dot + 1:] mod_path = full_attribute_name[: last_dot] mod = modules(mod_path) try: attr = getattr(mod, attr_name) except AttributeError: raise AttributeError(\"'%s' object has no attribute '%s'\" % (mod_path, attr_name)) return attr", - "docstring": "Load a module and retrieve an attribute of that module.", - "type": "function", - "file_path": "cherrypy\\cherrypy\\lib\\reprconf.py", - "ast_data": "FunctionDef name:attributes arguments arg:full_attribute_name Assign Call call:rfind Assign Assign Assign Call call:modules Try Assign Call call:getattr ExceptHandler Raise raises:AttributeError(\"'%s' object has no attribute '%s'\" % (mod_path, attr_name)) Return return:yes" - }, - { - "library": "kornia", - "name": "GaussianBlur2d", - "source_code": "class GaussianBlur2d(Module): def __init__(self, kernel_size: tuple[int, int] | int, sigma: tuple[float, float] | Tensor, border_type: str = 'reflect', separable: bool = True) -> None: super().__init__() self.kernel_size = kernel_size self.sigma = sigma self.border_type = border_type self.separable = separable def __repr__(self) -> str: return f'{self.__class__.__name__}(kernel_size = {self.kernel_size}, sigma = {self.sigma}, border_type = {self.border_type}, separable = {self.separable})' def forward(self, input: Tensor) -> Tensor: return gaussian_blur2d(input, self.kernel_size, self.sigma, self.border_type, self.separable)", - "docstring": "Create an operator that blurs a tensor using a Gaussian filter. The operator smooths the given tensor with a gaussian kernel by convolving it to each channel. It supports batched operation. Arguments: kernel_size: the size of the kernel. sigma: the standard deviation of the kernel. border_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)(B, C, H, W)` Examples:: >>> input = torch.rand(2, 4, 5, 5) >>> gauss = GaussianBlur2d((3, 3), (1.5, 1.5)) >>> output = gauss(input) # 2x4x5x5 >>> output.shape torch.Size([2, 4, 5, 5])", - "type": "class", - "file_path": "kornia\\kornia\\filters\\gaussian.py", - "ast_data": "ClassDef name:GaussianBlur2d FunctionDef name:__init__ arguments arg:self arg:kernel_size type:tuple[int, int] | int arg:sigma type:tuple[float, float] | Tensor arg:border_type type:str arg:separable type:bool Assign Assign Assign Assign FunctionDef name:__repr__ arguments arg:self Return return:yes FunctionDef name:forward arguments arg:self arg:input type:Tensor Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, source_shape, target_shape, layer_broadcasters, dtype = None): if not isinstance(source_shape, DynamicRaggedShape): raise TypeError('source_shape is not a DynamicRaggedShape') if not isinstance(target_shape, DynamicRaggedShape): raise TypeError('target_shape is not a DynamicRaggedShape') if not isinstance(layer_broadcasters, list): raise TypeError('layer_broadcasters not a list: ' + str(layer_broadcasters)) for bc in layer_broadcasters: if not isinstance(bc, _LayerBroadcaster): raise TypeError('Not a LayerBroadcaster: ' + str(bc)) dtype = _find_dtype(source_shape, dtype) dtype = _find_dtype(target_shape, dtype) dtype = _find_dtype_iterable(layer_broadcasters, dtype) dtype = _find_dtype(dtypes.int64, dtype) self._source_shape = source_shape.with_dtype(dtype) self._target_shape = target_shape.with_dtype(dtype) self._layer_broadcasters = [x.with_dtype(dtype) for x in layer_broadcasters]", - "docstring": "Create a broadcaster. Do not call directly. The source_shape, target_shape, and layer_broadcasters are converted to have the same dtype. Note: source_shape.rank and target_shape.rank must be known. Args: source_shape: the source DynamicRaggedShape target_shape: the target DynamicRaggedShape layer_broadcasters: List[_LayerBroadcaster] of length source_shape.rank. dtype: the preferred dtype of the broadcaster. Raises: TypeError: if the input types don't match.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:source_shape arg:target_shape arg:layer_broadcasters arg:dtype If Raise raises:TypeError('source_shape is not a DynamicRaggedShape') If Raise raises:TypeError('target_shape is not a DynamicRaggedShape') If Raise raises:TypeError('layer_broadcasters not a list: ' + str(layer_broadcasters)) For If Raise raises:TypeError('Not a LayerBroadcaster: ' + str(bc)) Assign Call call:_find_dtype Assign Call call:_find_dtype Assign Call call:_find_dtype_iterable Assign Call call:_find_dtype Assign Call call:with_dtype Assign Call call:with_dtype Assign" - }, - { - "library": "django", - "name": "get_ordering", - "source_code": "def get_ordering(self, request, queryset): params = self.params ordering = list(self.model_admin.get_ordering(request) or self._get_default_ordering()) if params.get(ORDER_VAR): ordering = [] order_params = params[ORDER_VAR].split('.') for p in order_params: try: none, pfx, idx = p.rpartition('-') field_name = self.list_display[int(idx)] order_field = self.get_ordering_field(field_name) if not order_field: continue if isinstance(order_field, OrderBy): if pfx = = '-': order_field = order_field.copy() order_field.reverse_ordering() ordering.append(order_field) elif hasattr(order_field, 'resolve_expression'): ordering.append(order_field.desc() if pfx = = '-' else order_field.asc()) elif pfx = = '-' and order_field.startswith(pfx): ordering.append(order_field.removeprefix(pfx)) else: ordering.append(pfx + order_field) except (IndexError, ValueError): continue ordering.extend(queryset.query.order_by) return self._get_deterministic_ordering(ordering)", - "docstring": "Return the list of ordering fields for the change list. First check the get_ordering() method in model admin, then check the object's default ordering. Then, any manually-specified ordering from the query string overrides anything. Finally, a deterministic order is guaranteed by calling _get_deterministic_ordering() with the constructed ordering.", - "type": "method", - "file_path": "django\\django\\contrib\\admin\\views\\main.py", - "ast_data": "FunctionDef name:get_ordering arguments arg:self arg:request arg:queryset Assign Assign Call call:list If Call call:get Assign Assign Call call:split For Try Assign Call call:rpartition Assign Assign Call call:get_ordering_field If If Call call:isinstance If Compare op:Eq Assign Call call:copy If Call call:hasattr If BoolOp Compare op:Eq Call call:startswith ExceptHandler Return return:yes" - }, - { - "library": "matplotlib", - "name": "transform_path_non_affine", - "source_code": "def transform_path_non_affine(self, path): x = self.transform_non_affine(path.vertices) return Path._fast_from_codes_and_verts(x, path.codes, path)", - "docstring": "Apply the non-affine part of this transform to *path*, returning a new . ``.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", - "ast_data": "FunctionDef name:transform_path_non_affine arguments arg:self arg:path Assign Call call:transform_non_affine Return return:yes" - }, - { - "library": "pytorch", - "name": "create_script_dict", - "source_code": "def create_script_dict(obj): return torch._C.ScriptDict(obj)", - "docstring": "Create a `` and can be passed between Python and TorchScript with reference semantics and zero copy overhead.", - "type": "function", - "file_path": "pytorch\\torch\\jit\\_script.py", - "ast_data": "FunctionDef name:create_script_dict arguments arg:obj Return return:yes" - }, - { - "library": "tensorflow", - "name": "AddBackpropLoopCounter", - "source_code": "def AddBackpropLoopCounter(self, count, outer_grad_state): in_separate_functions = count.graph is not ops.get_default_graph() if in_separate_functions: count = array_ops.identity(count) else: one = constant_op.constant(1, name = 'b_count') self.Enter() self.AddName(count.name) enter_count = _Enter(count, self._name, is_constant = False, parallel_iterations = self._parallel_iterations, name = 'b_count') self.loop_enters.append(enter_count) merge_count = merge([enter_count, enter_count])[0] self._pivot_for_pred = merge_count if in_separate_functions: one = constant_op.constant(1, name = 'b_count') pred = math_ops.greater_equal(merge_count, one) self._pivot = loop_cond(pred, name = 'b_count') switch_count = switch(merge_count, self._pivot) index = math_ops.subtract(switch_count[1], one) self._pivot_for_body = index next_count = _NextIteration(index) merge_count.op._update_input(1, next_count) final_zero = exit(switch_count[0], name = 'b_count') self.loop_exits.append(final_zero) if outer_grad_state is not None: outer_grad_state.grad_sync._add_control_input(final_zero.op) self.ExitResult([final_zero]) self.Exit() return next_count", - "docstring": "Add the backprop loop that controls the iterations. This is added to the backprop loop. It is used to control the loop termination of the backprop loop. Called in the outer context of this grad context. The pseudocode is: Note that a control dependency is added to to ensure the correct execution order of stack pop ops. Args: count: The number of iterations for backprop. outer_grad_state: The outer grad state. None if not nested. Returns: The loop index.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", - "ast_data": "FunctionDef name:AddBackpropLoopCounter arguments arg:self arg:count arg:outer_grad_state Assign Compare op:IsNot If Assign Call call:identity Assign Call call:constant Assign Call call:_Enter Assign Assign If Assign Call call:constant Assign Call call:greater_equal Assign Call call:loop_cond Assign Call call:switch Assign Call call:subtract Assign Assign Call call:_NextIteration Assign Call call:exit If Compare op:IsNot Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_status", - "source_code": "def get_status(self): return [not colors.same_color(color, colors.to_rgba('none')) for color in self._checks.get_facecolors()]", - "docstring": "Return a list of the status (True/False) of all of the check buttons.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", - "ast_data": "FunctionDef name:get_status arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "verify", - "source_code": "def verify(self, **kwargs): torch.testing.assert_close(self.extern.output_tensor, self.expected, **kwargs)", - "docstring": "Verify the correctness of the benchmarking results", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py", - "ast_data": "FunctionDef name:verify arguments arg:self kwarg:kwargs" - }, - { - "library": "pytorch", - "name": "info_dict", - "source_code": "def info_dict(self) -> dict[str, Union[PrimitiveInfoType, list[PrimitiveInfoType]]]: return {'backend': 'extern', 'kernel_call_name': self.choice.call_name()}", - "docstring": "Information returned here is logged to the autotune log file when that is enabled.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py", - "ast_data": "FunctionDef name:info_dict arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "decorator", - "source_code": "def decorator(dispatch_target): if not callable(dispatch_target): raise TypeError(f'Expected dispatch_target to be callable; got {dispatch_target!r}') dispatch_target = _add_name_scope_wrapper(dispatch_target, api_signature) _check_signature(api_signature, dispatch_target) for signature_checker in signature_checkers: dispatcher.Register(signature_checker, dispatch_target) _TYPE_BASED_DISPATCH_SIGNATURES[api][dispatch_target].extend(signatures) if not signature_checkers: signature = _signature_from_annotations(dispatch_target) checker = _make_signature_checker(api_signature, signature) dispatcher.Register(checker, dispatch_target) _TYPE_BASED_DISPATCH_SIGNATURES[api][dispatch_target].append(signature) return dispatch_target", - "docstring": "Decorator that registers the given dispatch target.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py", - "ast_data": "FunctionDef name:decorator arguments arg:dispatch_target If Raise raises:TypeError(f'Expected dispatch_target to be callable; got {dispatch_target!r}') Assign Call call:_add_name_scope_wrapper For If Assign Call call:_signature_from_annotations Assign Call call:_make_signature_checker Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_fig_manager", - "source_code": "@classmethod def get_fig_manager(cls, num): manager = cls.figs.get(num, None) if manager is not None: cls.set_active(manager) return manager", - "docstring": "If manager number *num* exists, make it the active one and return it; otherwise return *None*.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\_pylab_helpers.py", - "ast_data": "FunctionDef name:get_fig_manager arguments arg:cls arg:num Assign Call call:get If Compare op:IsNot Return return:yes" - }, - { - "library": "tensorflow", - "name": "reduce_all", - "source_code": "def reduce_all(input_tensor, axis = None, keepdims = False): v = get_static_value(input_tensor) if v is None: return math_ops.reduce_all(input_tensor, axis = axis, keepdims = keepdims) else: return v.all(axis = axis, keepdims = keepdims)", - "docstring": "A version of tf.reduce_all that eagerly evaluates if possible.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py", - "ast_data": "FunctionDef name:reduce_all arguments arg:input_tensor arg:axis arg:keepdims Assign Call call:get_static_value If Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_dataset", - "source_code": "@abc.abstractmethod def get_dataset(self): raise NotImplementedError", - "docstring": "Get a dataset instance for the current DataAdapter. Note that the dataset returned does not repeat for epoch, so caller might need to create new iterator for the same dataset at the beginning of the epoch. This behavior might change in future. Returns: An tf.dataset.Dataset. Caller might use the dataset in different context, eg iter(dataset) in eager to get the value directly, or in graph mode, provide the iterator tensor to Keras model function.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py", - "ast_data": "FunctionDef name:get_dataset arguments arg:self Raise raises:NotImplementedError" - }, - { - "library": "tensorflow", - "name": "AddLoss", - "source_code": "class AddLoss(Layer): def __init__(self, unconditional, **kwargs): kwargs['autocast'] = False super(AddLoss, self).__init__(**kwargs) self.unconditional = unconditional def call(self, inputs): self.add_loss(inputs, inputs = not self.unconditional) return inputs def get_config(self): config = super(AddLoss, self).get_config() config.update({'unconditional': self.unconditional}) return config", - "docstring": "Adds its inputs as a loss. Attributes: unconditional: Whether or not the loss should be conditioned on the inputs.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", - "ast_data": "ClassDef name:AddLoss FunctionDef name:__init__ arguments arg:self arg:unconditional kwarg:kwargs Assign Assign FunctionDef name:call arguments arg:self arg:inputs Return return:yes FunctionDef name:get_config arguments arg:self Assign Call call:get_config Return return:yes" - }, - { - "library": "scipy", - "name": "find_repeats", - "source_code": "def find_repeats(arr): compr = np.asarray(ma.compressed(arr), dtype = np.float64) try: need_copy = np.may_share_memory(compr, arr) except AttributeError: need_copy = False if need_copy: compr = compr.copy() return _find_repeats(compr)", - "docstring": "Find repeats in arr and return a tuple (repeats, repeat_count). The input is cast to float64. Masked values are discarded. Parameters ---------- arr : sequence Input array. The array is flattened if it is not 1D. Returns ------- repeats : ndarray Array of repeated values. counts : ndarray Array of counts. Examples -------- >>> from scipy.stats import mstats >>> mstats.find_repeats([2, 1, 2, 3, 2, 2, 5]) (array([2.]), array([4])) In the above example, 2 repeats 4 times. >>> mstats.find_repeats([[10, 20, 1, 2], [5, 5, 4, 4]]) (array([4., 5.]), array([2, 2])) In the above example, both 4 and 5 repeat 2 times.", - "type": "function", - "file_path": "scipy\\scipy\\stats\\_mstats_basic.py", - "ast_data": "FunctionDef name:find_repeats arguments arg:arr Assign Call call:asarray Try Assign Call call:may_share_memory ExceptHandler Assign If Assign Call call:copy Return return:yes" - }, - { - "library": "django", - "name": "feature_kwargs", - "source_code": "def feature_kwargs(self, feat): kwargs = {} for field_name, ogr_name in self.mapping.items(): model_field = self.fields[field_name] if isinstance(model_field, GeometryField): try: val = self.verify_geom(feat.geom, model_field) except GDALException: raise LayerMapError('Could not retrieve geometry from feature.') elif isinstance(model_field, models.base.ModelBase): val = self.verify_fk(feat, model_field, ogr_name) else: val = self.verify_ogr_field(feat[ogr_name], model_field) kwargs[field_name] = val return kwargs", - "docstring": "Given an OGR Feature, return a dictionary of keyword arguments for constructing the mapped model.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\utils\\layermapping.py", - "ast_data": "FunctionDef name:feature_kwargs arguments arg:self arg:feat Assign For Call call:items Assign If Call call:isinstance Try Assign Call call:verify_geom ExceptHandler Raise raises:LayerMapError('Could not retrieve geometry from feature.') If Call call:isinstance Assign Call call:verify_fk Assign Call call:verify_ogr_field Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "trackable_has_serialize_to_tensor", - "source_code": "def trackable_has_serialize_to_tensor(obj): if obj is base_delegate.DelegatingTrackableMixin: return trackable_has_serialize_to_tensor(obj._trackable) try: if '_serialize_to_tensors' in obj.__dict__: return True except (AttributeError, TypeError): pass for t in type(obj).mro(): if t is base_delegate.DelegatingTrackableMixin: return trackable_has_serialize_to_tensor(obj._trackable) if t is trackable.Trackable: return False elif '_serialize_to_tensors' in t.__dict__: return True elif '_gather_saveables_for_checkpoint' in t.__dict__: return False return False", - "docstring": "Returns whether obj's class has defined.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py", - "ast_data": "FunctionDef name:trackable_has_serialize_to_tensor arguments arg:obj If Compare op:Is Return return:yes Try If Compare op:In Return return:yes ExceptHandler For Call call:mro If Compare op:Is Return return:yes If Compare op:Is Return return:yes If Compare op:In Return return:yes If Compare op:In Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "__init__", - "source_code": "def __init__(self, onnxfunction: onnxscript.OnnxFunction | onnxscript.TracedOnnxFunction): self.onnxfunction = onnxfunction self.param_schema = self.onnxfunction.param_schemas() op_schema = self.onnxfunction.op_schema assert op_schema is not None self.op_schema = op_schema self.type_constraints = {constraint.type_param_str: set(constraint.allowed_type_strs) for constraint in self.op_schema.type_constraints} self.attributes = self.op_schema.attributes self._matching_score: int | None = None", - "docstring": "Initialize the OnnxSchemaChecker . Args: onnxfunction: The OnnxFunction.", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\onnxfunction_dispatcher.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:onnxfunction type:onnxscript.OnnxFunction | onnxscript.TracedOnnxFunction Assign Assign Call call:param_schemas Assign Assign Assign Assign" - }, - { - "library": "pygame", - "name": "change_layer", - "source_code": "def change_layer(self, sprite, new_layer): LayeredUpdates.change_layer(self, sprite, new_layer) if sprite.dirty = = 0: sprite.dirty = 1", - "docstring": "change the layer of the sprite LayeredUpdates.change_layer(sprite, new_layer): return None The sprite must have been added to the renderer already. This is not checked.", - "type": "method", - "file_path": "pygame\\src_py\\sprite.py", - "ast_data": "FunctionDef name:change_layer arguments arg:self arg:sprite arg:new_layer If Compare op:Eq Assign" - }, - { - "library": "scipy", - "name": "pointbiserialr", - "source_code": "def pointbiserialr(x, y): x = ma.fix_invalid(x, copy = True).astype(bool) y = ma.fix_invalid(y, copy = True).astype(float) m = ma.mask_or(ma.getmask(x), ma.getmask(y)) if m is not nomask: unmask = np.logical_not(m) x = x[unmask] y = y[unmask] n = len(x) phat = x.sum() / float(n) y0 = y[~x] y1 = y[x] y0m = y0.mean() y1m = y1.mean() rpb = (y1m - y0m) * np.sqrt(phat * (1 - phat)) / y.std() df = n - 2 t = rpb * ma.sqrt(df / (1.0 - rpb ** 2)) prob = _betai(0.5 * df, 0.5, df / (df + t * t)) return PointbiserialrResult(rpb, prob)", - "docstring": "Calculates a point biserial correlation coefficient and its p-value. Parameters ---------- x : array_like of bools Input array. y : array_like Input array. Returns ------- correlation : float R value pvalue : float 2-tailed p-value Notes ----- Missing values are considered pair-wise: if a value is missing in x, the corresponding value in y is masked. For more details on , see .", - "type": "function", - "file_path": "scipy\\scipy\\stats\\_mstats_basic.py", - "ast_data": "FunctionDef name:pointbiserialr arguments arg:x arg:y Assign Call call:astype Assign Call call:astype Assign Call call:mask_or If Compare op:IsNot Assign Call call:logical_not Assign Assign Assign Call call:len Assign Assign Assign Assign Call call:mean Assign Call call:mean Assign Assign Assign Assign Call call:_betai Return return:yes" - }, - { - "library": "cherrypy", - "name": "validate_translator", - "source_code": "def validate_translator(t): if not isinstance(t, dict): raise ValueError('The translate argument must be a dict.')", - "docstring": "Ensure the translator is of the correct length and size.", - "type": "function", - "file_path": "cherrypy\\cherrypy\\_cpdispatch.py", - "ast_data": "FunctionDef name:validate_translator arguments arg:t If Raise raises:ValueError('The translate argument must be a dict.')" - }, - { - "library": "authlib", - "name": "delete_temporary_credential", - "source_code": "def delete_temporary_credential(self, request): raise NotImplementedError()", - "docstring": "Delete temporary credential from database or cache. For instance, if temporary credential is saved in cache:: def delete_temporary_credential(self, request): key = \"a-key-prefix:{}\".format(request.token) cache.delete(key) :param request: OAuth1Request instance", - "type": "method", - "file_path": "authlib\\authlib\\oauth1\\rfc5849\\authorization_server.py", - "ast_data": "FunctionDef name:delete_temporary_credential arguments arg:self arg:request Raise raises:NotImplementedError()" - }, - { - "library": "tensorflow", - "name": "scatter_sub", - "source_code": "def scatter_sub(self, sparse_delta, use_locking = False, name = None): if not isinstance(sparse_delta, indexed_slices.IndexedSlices): raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta) return gen_state_ops.scatter_sub(self._variable, sparse_delta.indices, sparse_delta.values, use_locking = use_locking, name = name)", - "docstring": "Subtracts from this variable. Args: sparse_delta: to be subtracted from this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered subtraction has completed. Raises: TypeError: if is not an .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py", - "ast_data": "FunctionDef name:scatter_sub arguments arg:self arg:sparse_delta arg:use_locking arg:name If Raise raises:TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta) Return return:yes" - }, - { - "library": "mongo", - "name": "max", - "source_code": "def max(self, spec: _Sort) -> Cursor[_DocumentType]: if not isinstance(spec, (list, tuple)): raise TypeError(f'spec must be an instance of list or tuple, not {type(spec)}') self._check_okay_to_chain() self._max = dict(spec) return self", - "docstring": "Adds `~hint~hint~hint`. .. versionadded:: 2.7", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\cursor.py", - "ast_data": "FunctionDef name:max arguments arg:self arg:spec type:_Sort If Raise raises:TypeError(f'spec must be an instance of list or tuple, not {type(spec)}') Assign Call call:dict Return return:yes" - }, - { - "library": "tensorflow", - "name": "NativeObject", - "source_code": "class NativeObject(object): pass", - "docstring": "Types natively supported by various TF operations. The most notable example of NativeObject is Tensor.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\types\\internal.py", - "ast_data": "ClassDef name:NativeObject" - }, - { - "library": "kornia", - "name": "cx_right", - "source_code": "@property def cx_right(self) -> Tensor: return self.rectified_right_camera[..., 0, 2]", - "docstring": "Return the x-coordinate of the principal point for the right camera. Returns: tensor of shape :math:", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py", - "ast_data": "FunctionDef name:cx_right arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "next_layer", - "source_code": "def next_layer(self, original_rp, broadcast_rp): gather_index = _next_layer_gather_index(self, original_rp, broadcast_rp) return _LayerBroadcaster.from_gather_index(gather_index)", - "docstring": "Create the next layer gather_index whether or not a broadcast happens. *---------self------->* | | original_rp broadcast_rp | | \\|/ \\|/ *--next_broadcaster-->* Args: original_rp: the original row partition. broadcast_rp: the target row partition. Returns: the gather_index for next_broadcaster.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", - "ast_data": "FunctionDef name:next_layer arguments arg:self arg:original_rp arg:broadcast_rp Assign Call call:_next_layer_gather_index Return return:yes" - }, - { - "library": "numpy", - "name": "randn", - "source_code": "def randn(*args): if isinstance(args[0], tuple): args = args[0] return asmatrix(np.random.randn(*args))", - "docstring": "Return a random matrix with data from the \"standard normal\" distribution. generates a matrix filled with random floats sampled from a univariate \"normal\" (Gaussian) distribution of mean 0 and variance 1. Parameters ---------- \\*args : Arguments Shape of the output. If given as N integers, each integer specifies the size of one dimension. If given as a tuple, this tuple gives the complete shape. Returns ------- Z : matrix of floats A matrix of floating-point samples drawn from the standard normal distribution. See Also -------- rand, numpy.random.RandomState.randn Notes ----- For random samples from the normal distribution with mean ``, use:: sigma * np.matlib.randn(...) + mu Examples -------- >>> np.random.seed(123) >>> import numpy.matlib >>> np.matlib.randn(1) matrix([[-1.0856306]]) >>> np.matlib.randn(1, 2, 3) matrix([[ 0.99734545, 0.2829785 , -1.50629471], [-0.57860025, 1.65143654, -2.42667924]]) Two-by-four matrix of samples from the normal distribution with mean 3 and standard deviation 2.5: >>> 2.5 * np.matlib.randn((2, 4)) + 3 matrix([[1.92771843, 6.16484065, 0.83314899, 1.30278462], [2.76322758, 6.72847407, 1.40274501, 1.8900451 ]])", - "type": "function", - "file_path": "numpy\\numpy\\matlib.py", - "ast_data": "FunctionDef name:randn arguments vararg:args If Call call:isinstance Assign Return return:yes" - }, - { - "library": "authlib", - "name": "get_acr", - "source_code": "def get_acr(self) -> str: return None", - "docstring": "Get the \"acr\" (Authentication Method Class) value of the authorization code object.", - "type": "method", - "file_path": "authlib\\authlib\\oidc\\core\\models.py", - "ast_data": "FunctionDef name:get_acr arguments arg:self Return return:yes" - }, - { - "library": "pandas", - "name": "main", - "source_code": "def main(function: Callable[[IO[str]], Iterable[tuple[int, str]]], source_path: str, output_format: str) -> bool: is_failed: bool = False for file_path in source_path: with open(file_path, encoding = 'utf-8') as file_obj: for line_number, msg in function(file_obj): is_failed = True print(output_format.format(source_path = file_path, line_number = line_number, msg = msg)) return is_failed", - "docstring": "Main entry point of the script. Parameters ---------- function : Callable Function to execute for the specified validation type. source_path : str Source path representing path to a file/directory. output_format : str Output format of the error message. file_extensions_to_check : str Comma separated values of what file extensions to check. excluded_file_paths : str Comma separated values of what file paths to exclude during the check. Returns ------- bool True if found any patterns are found related to the given function. Raises ------ ValueError If the is not pointing to existing file/directory.", - "type": "function", - "file_path": "pandas\\scripts\\validate_unwanted_patterns.py", - "ast_data": "FunctionDef name:main arguments arg:function type:Callable[[IO[str]], Iterable[tuple[int, str]]] arg:source_path type:str arg:output_format type:str For With For Call call:function Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "any_chain", - "source_code": "@compatibility(is_backward_compatible = False) def any_chain(*op_support: OperatorSupportBase) -> OperatorSupportBase: def _any_chain(submods, node) -> bool: return any((x.is_node_supported(submods, node) for x in op_support)) return create_op_support(_any_chain)", - "docstring": "Combines a sequence of instances to form a single instance by evaluating each input instance, and returns True if any of it reports True.", - "type": "function", - "file_path": "pytorch\\torch\\fx\\passes\\operator_support.py", - "ast_data": "FunctionDef name:any_chain arguments vararg:op_support Call call:compatibility FunctionDef name:_any_chain arguments arg:submods arg:node Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "broadcast_dim", - "source_code": "def broadcast_dim(tensor_input1, tensor_input2, res1, res2, index, padding = False): if tensor_input1[index] is None: assert padding if not padding: return Conj([BinConstraintD(tensor_input1[index], 1, op_eq), BinConstraintD(res1[index], res2[index], op_eq), BinConstraintD(res2[index], tensor_input2[index], op_eq)]) else: return Conj([BinConstraintD(res1[index], res2[index], op_eq), BinConstraintD(res2[index], tensor_input2[index], op_eq)])", - "docstring": "Apply broadcasting to the 'index' dimension of tensor_input1. Args: tensor_input1: should represent [d1, ..., d_index, ...] where d_index = 1 tensor_input2: represents the second input res1: broadcasted result 1 res2: broadcasted result 2 index: the index to broadcast padding: If padding was used, then tensor_input1[index] does not exist Returns:", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py", - "ast_data": "FunctionDef name:broadcast_dim arguments arg:tensor_input1 arg:tensor_input2 arg:res1 arg:res2 arg:index arg:padding If Compare op:Is If Return return:yes Return return:yes" - }, - { - "library": "scikit-learn", - "name": "get_canonical_name_git_grep", - "source_code": "def get_canonical_name_git_grep(filename): return re.sub('\\\\.pyx(\\\\.tp)?', '', filename)", - "docstring": "Return name based on filename. The goal is to return a name that can easily be matched with the output from .", - "type": "function", - "file_path": "scikit-learn\\build_tools\\check-meson-openmp-dependencies.py", - "ast_data": "FunctionDef name:get_canonical_name_git_grep arguments arg:filename Return return:yes" - }, - { - "library": "django", - "name": "references_index", - "source_code": "def references_index(self, table, index): return False", - "docstring": "Return whether or not this instance references the specified index.", - "type": "method", - "file_path": "django\\django\\db\\backends\\ddl_references.py", - "ast_data": "FunctionDef name:references_index arguments arg:self arg:table arg:index Return return:yes" - }, - { - "library": "pytorch", - "name": "output_spec", - "source_code": "@cached_property def output_spec(self) -> DTensorSpec: if isinstance(self.output_specs, DTensorSpec): return self.output_specs else: raise ValueError(f'function output_spec expects a single DTensorSpec but got: {self.output_specs}')", - "docstring": "This function requires that the strategy have exactly one DTensorSpec as the output spec. If the output_specs is a tuple, we throw an exception.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\tensor\\_op_schema.py", - "ast_data": "FunctionDef name:output_spec arguments arg:self If Call call:isinstance Return return:yes Raise raises:ValueError(f'function output_spec expects a single DTensorSpec but got: {self.output_specs}')" - }, - { - "library": "tensorflow", - "name": "make_initializable_iterator", - "source_code": "def make_initializable_iterator(self): return self._make_initializable_iterator()", - "docstring": "Get an initializable iterator for DistributedDatasetV1. Note: This API is deprecated. Please use to create an initializable iterator. Returns: A DistributedIteratorV1 instance.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py", - "ast_data": "FunctionDef name:make_initializable_iterator arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "linear", - "source_code": "def linear(input: Tensor, weight: Tensor, bias: Optional[Tensor] = None, scale: Optional[float] = None, zero_point: Optional[int] = None) -> Tensor: if scale is None: scale = input.q_scale() if zero_point is None: zero_point = input.q_zero_point() _packed_params = torch.ops.quantized.linear_prepack(weight, bias) return torch.ops.quantized.linear(input, _packed_params, scale, zero_point)", - "docstring": "Applies a linear transformation to the incoming quantized data: :math:. See :class: .. note:: Current implementation packs weights on every call, which has penalty on performance. If you want to avoid the overhead, use :class:. Args: input (Tensor): Quantized input of type weight (Tensor): Quantized weight of type bias (Tensor): None or fp32 bias of type scale (double): output scale. If None, derived from the input scale zero_point (long): output zero point. If None, derived from the input zero_point Shape: - Input: :math: where means any number of additional dimensions - Weight: :math: - Bias: :math: - Output: :math:", - "type": "function", - "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py", - "ast_data": "FunctionDef name:linear arguments arg:input type:Tensor arg:weight type:Tensor arg:bias type:Optional[Tensor] arg:scale type:Optional[float] arg:zero_point type:Optional[int] If Compare op:Is Assign Call call:q_scale If Compare op:Is Assign Call call:q_zero_point Assign Call call:linear_prepack Return return:yes" - }, - { - "library": "sphinx", - "name": "convert_html_css_files", - "source_code": "def convert_html_css_files(app: Sphinx, config: Config) -> None: html_css_files: list[tuple[str, dict[str, str]]] = [] for entry in config.html_css_files: if isinstance(entry, str): html_css_files.append((entry, {})) else: try: filename, attrs = entry html_css_files.append((filename, attrs)) except Exception: logger.warning(__('invalid css_file: %r, ignored'), entry) continue config.html_css_files = html_css_files", - "docstring": "Convert string styled html_css_files to tuple styled one.", - "type": "function", - "file_path": "sphinx\\sphinx\\builders\\html\\__init__.py", - "ast_data": "FunctionDef name:convert_html_css_files arguments arg:app type:Sphinx arg:config type:Config For If Call call:isinstance Try Assign ExceptHandler Assign" - }, - { - "library": "scipy", - "name": "tmax", - "source_code": "@xp_capabilities() @_axis_nan_policy_factory(lambda x: x, n_outputs = 1, result_to_tuple = lambda x, _: (x,)) def tmax(a, upperlimit = None, axis = 0, inclusive = True, nan_policy = 'propagate'): xp = array_namespace(a) min_ = xp.iinfo(a.dtype).min if xp.isdtype(a.dtype, 'integral') else -xp.inf a, mask = _put_val_to_limits(a, (None, upperlimit), (None, inclusive), val = min_, xp = xp) res = xp.max(a, axis = axis) invalid = xp.all(mask, axis = axis) if is_lazy_array(invalid) or xp.any(invalid): res = xp_promote(res, force_floating = True, xp = xp) res = xp.where(invalid, xp.nan, res) return res[()] if res.ndim = = 0 else res", - "docstring": "Compute the trimmed maximum. This function computes the maximum value of an array along a given axis, while ignoring values larger than a specified upper limit. Parameters ---------- a : array_like Array of values. upperlimit : None or float, optional Values in the input array greater than the given limit will be ignored. When upperlimit is None, then all values are used. The default value is None. axis : int or None, optional Axis along which to operate. Default is 0. If None, compute over the whole array . inclusive : {True, False}, optional This flag determines whether values exactly equal to the upper limit are included. The default value is True. Returns ------- tmax : float, int or ndarray Trimmed maximum. Examples -------- >>> import numpy as np >>> from scipy import stats >>> x = np.arange(20) >>> stats.tmax(x) 19 >>> stats.tmax(x, 13) 13 >>> stats.tmax(x, 13, inclusive=False) 12", - "type": "function", - "file_path": "scipy\\scipy\\stats\\_stats_py.py", - "ast_data": "FunctionDef name:tmax arguments arg:a arg:upperlimit arg:axis arg:inclusive arg:nan_policy Call call:xp_capabilities Call call:_axis_nan_policy_factory Assign Call call:array_namespace Assign Assign Call call:_put_val_to_limits Assign Call call:max Assign Call call:all If BoolOp Call call:is_lazy_array Call call:any Assign Call call:xp_promote Assign Call call:where Return return:yes" - }, - { - "library": "numpy", - "name": "__init__", - "source_code": "def __init__(self, float_conv = float, int_conv = int, float_to_float = float, float_to_str = lambda v: f'{v: 24.16e}', title = 'Python floating point number'): with errstate(under = 'ignore'): self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)", - "docstring": "float_conv - convert integer to float (array) int_conv - convert float (array) to integer float_to_float - convert float array to float float_to_str - convert array float to str title - description of used floating point numbers", - "type": "method", - "file_path": "numpy\\numpy\\_core\\_machar.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:float_conv arg:int_conv arg:float_to_float arg:float_to_str arg:title With" - }, - { - "library": "pytorch", - "name": "OrderedDictWrapper", - "source_code": "class OrderedDictWrapper: def __init__(self, cpp_module, attr): self.cpp_module = cpp_module self.attr = attr @property def cpp_dict(self): return getattr(self.cpp_module, self.attr) def items(self): return self.cpp_dict.items() def keys(self): return self.cpp_dict.keys() def values(self): return self.cpp_dict.values() def __iter__(self): return self.cpp_dict.__iter__() def __len__(self): return self.cpp_dict.__len__() def __contains__(self, key): return self.cpp_dict.__contains__(key) def __getitem__(self, key): return self.cpp_dict.__getitem__(key)", - "docstring": "A wrapper around a C++ OrderedDict. It dynamically evaluates the OrderedDict getter on a bound C++ module, such that new changes on the C++ side are picked up. Otherwise accessing e.g. `` so using properties does not work.", - "type": "class", - "file_path": "pytorch\\torch\\nn\\cpp.py", - "ast_data": "ClassDef name:OrderedDictWrapper FunctionDef name:__init__ arguments arg:self arg:cpp_module arg:attr Assign Assign FunctionDef name:cpp_dict arguments arg:self Return return:yes FunctionDef name:items arguments arg:self Return return:yes FunctionDef name:keys arguments arg:self Return return:yes FunctionDef name:values arguments arg:self Return return:yes FunctionDef name:__iter__ arguments arg:self Return return:yes FunctionDef name:__len__ arguments arg:self Return return:yes FunctionDef name:__contains__ arguments arg:self arg:key Return return:yes FunctionDef name:__getitem__ arguments arg:self arg:key Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_next", - "source_code": "def get_next(self, device, name = None): del device, name with ops.device(self._worker): return self._fn()", - "docstring": "Get next element for the given device from the callable.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py", - "ast_data": "FunctionDef name:get_next arguments arg:self arg:device arg:name With Return return:yes" - }, - { - "library": "pytorch", - "name": "cross_product", - "source_code": "def cross_product(*inputs): return list(itertools.product(*inputs))", - "docstring": "Return a list of cartesian product of input iterables. For example, cross_product(A, B) returns ((x,y) for x in A for y in B).", - "type": "function", - "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_utils.py", - "ast_data": "FunctionDef name:cross_product arguments vararg:inputs Return return:yes" - }, - { - "library": "pytorch", - "name": "Shard", - "source_code": "@dataclass class Shard: __slots__ = ['tensor', 'metadata'] tensor: torch.Tensor metadata: ShardMetadata def __post_init__(self) -> None: if list(self.tensor.size()) ! = self.metadata.shard_sizes: raise ValueError(f'Shard tensor size does not match with metadata.shard_lengths! Found shard tensor size: {list(self.tensor.size())}, metadata.shard_lengths: {self.metadata.shard_sizes}, ') placement_device = self.metadata.placement if placement_device is not None and placement_device.device() ! = self.tensor.device: raise ValueError(f\"Local shard tensor device does not match with local Shard's placement! Found local shard tensor device: {self.tensor.device}, local shard metadata placement device: {placement_device.device()}\") @classmethod def from_tensor_and_offsets(cls, tensor: torch.Tensor, shard_offsets: list[int], rank: int) -> 'Shard': shard_sizes = list(tensor.size()) placement = _remote_device(f'rank: {rank}/{str(tensor.device)}') shard_meta = ShardMetadata(shard_offsets = shard_offsets, shard_sizes = shard_sizes, placement = placement) return Shard(tensor, shard_meta)", - "docstring": "Container which holds the data for a shard as a Tensor and also the associated metadata for that shard. Args: tensor(torch.Tensor): Local tensor for the shard. metadata(:class ): The metadata for the shard, including offsets, lengths and device placement.", - "type": "class", - "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\shard.py", - "ast_data": "ClassDef name:Shard Assign FunctionDef name:__post_init__ arguments arg:self If Compare op:NotEq Raise raises:ValueError(f'Shard tensor size does not match with metadata.shard_lengths! Found shard tensor size: {list(self.tensor.size())}, metadata.shard_lengths: {self.metadata.shard_sizes}, ') Assign If BoolOp Compare op:IsNot Compare op:NotEq Raise raises:ValueError(f\"Local shard tensor device does not match with local Shard's placement! Found local shard tensor device: {self.tensor.device}, local shard metadata placement device: {placement_device.device()}\") FunctionDef name:from_tensor_and_offsets arguments arg:cls arg:tensor type:torch.Tensor arg:shard_offsets type:list[int] arg:rank type:int Assign Call call:list Assign Call call:_remote_device Assign Call call:ShardMetadata Return return:yes" - }, - { - "library": "mongo", - "name": "NotPrimaryError", - "source_code": "class NotPrimaryError(AutoReconnect): def __init__(self, message: str = '', errors: Optional[Union[Mapping[str, Any], list[Any]]] = None) -> None: super().__init__(_format_detailed_error(message, errors), errors = errors)", - "docstring": "The server responded \"not primary\" or \"node is recovering\". These errors result from a query, write, or command. The operation failed because the client thought it was using the primary but the primary has stepped down, or the client thought it was using a healthy secondary but the secondary is stale and trying to recover. The client launches a refresh operation on a background thread, to update its view of the server as soon as possible after throwing this exception. Subclass of :exc:. .. versionadded:: 3.12", - "type": "class", - "file_path": "mongo\\pymongo\\errors.py", - "ast_data": "ClassDef name:NotPrimaryError FunctionDef name:__init__ arguments arg:self arg:message type:str arg:errors type:Optional[Union[Mapping[str, Any], list[Any]]]" - }, - { - "library": "pytorch", - "name": "is_warn_always_enabled", - "source_code": "def is_warn_always_enabled() -> builtins.bool: return _C._get_warnAlways()", - "docstring": "Returns True if the global warn_always flag is turned on. Refer to :func: documentation for more details.", - "type": "function", - "file_path": "pytorch\\torch\\__init__.py", - "ast_data": "FunctionDef name:is_warn_always_enabled arguments Return return:yes" - }, - { - "library": "pandas", - "name": "kind", - "source_code": "@property def kind(self) -> str: return 'O'", - "docstring": "A character code (one of 'biufcmMOSUV'), default 'O' This should match the NumPy dtype used when the array is converted to an ndarray, which is probably 'O' for object if the extension type cannot be represented as a built-in NumPy type. See Also -------- numpy.dtype.kind", - "type": "method", - "file_path": "pandas\\pandas\\core\\dtypes\\base.py", - "ast_data": "FunctionDef name:kind arguments arg:self Return return:yes" - }, - { - "library": "sphinx", - "name": "eval_condition", - "source_code": "def eval_condition(self, condition: str) -> bool: if condition in self._condition_cache: return self._condition_cache[condition] parser = BooleanParser(_ENV, condition, state = 'variable') expr = parser.parse_expression() if not parser.stream.eos: msg = 'chunk after expression' raise ValueError(msg) evaluated = self._condition_cache[condition] = self._eval_node(expr) return evaluated", - "docstring": "Evaluate a boolean condition. Only conditional expressions and binary operators (and, or, not) are permitted, and operate on tag names, where truthy values mean the tag is present and vice versa.", - "type": "method", - "file_path": "sphinx\\sphinx\\util\\tags.py", - "ast_data": "FunctionDef name:eval_condition arguments arg:self arg:condition type:str If Compare op:In Return return:yes Assign Call call:BooleanParser Assign Call call:parse_expression If Assign Raise raises:ValueError(msg) Assign Call call:_eval_node Return return:yes" - }, - { - "library": "pytorch", - "name": "AHFeature", - "source_code": "class AHFeature: def __init__(self, name: str, value: Value, is_categorical: bool = False) -> None: self.name = name self.value = value self.is_categorical = is_categorical", - "docstring": "The context, that AutoHeuristic stores, is a list of features. AutoHeuristic needs to know whether a feature is categorical (i.e., not a continuous variable) to learn a machine learning model.", - "type": "class", - "file_path": "pytorch\\torch\\_inductor\\autoheuristic\\autoheuristic_utils.py", - "ast_data": "ClassDef name:AHFeature FunctionDef name:__init__ arguments arg:self arg:name type:str arg:value type:Value arg:is_categorical type:bool Assign Assign Assign" - }, - { - "library": "mongo", - "name": "cluster_time", - "source_code": "@property def cluster_time(self) -> Optional[ClusterTime]: return self._cluster_time", - "docstring": "The cluster time returned by the last operation executed in this session.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\client_session.py", - "ast_data": "FunctionDef name:cluster_time arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "render", - "source_code": "def render(self): retval = self if not self._is_rendered: self.content = self.rendered_content for post_callback in self._post_render_callbacks: newretval = post_callback(retval) if newretval is not None: retval = newretval return retval", - "docstring": "Render (thereby finalizing) the content of the response. If the content has already been rendered, this is a no-op. Return the baked response instance.", - "type": "method", - "file_path": "django\\django\\template\\response.py", - "ast_data": "FunctionDef name:render arguments arg:self Assign If Assign For Assign Call call:post_callback If Compare op:IsNot Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "__new__", - "source_code": "def __new__(cls, stylename, **kwargs): _list = stylename.replace(' ', '').split(', ') _name = _list[0].lower() try: _cls = cls._style_list[_name] except KeyError as err: raise ValueError(f'Unknown style: {stylename!r}') from err try: _args_pair = [cs.split(' = ') for cs in _list[1:]] _args = {k: float(v) for k, v in _args_pair} except ValueError as err: raise ValueError(f'Incorrect style argument: {stylename!r}') from err return _cls(**{**_args, **kwargs})", - "docstring": "Return the instance of the subclass with the given style name.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:__new__ arguments arg:cls arg:stylename kwarg:kwargs Assign Call call:split Assign Call call:lower Try Assign ExceptHandler Raise raises:ValueError(f'Unknown style: {stylename!r}') Try Assign Assign ExceptHandler Raise raises:ValueError(f'Incorrect style argument: {stylename!r}') Return return:yes" - }, - { - "library": "tensorflow", - "name": "stitch_values", - "source_code": "def stitch_values(values_and_indices_list): length = 0 for values_and_indices in values_and_indices_list: length + = len(values_and_indices[0]) result = [None] * length for values_and_indices in values_and_indices_list: if values_and_indices and values_and_indices[0]: for v, i in zip(*values_and_indices): assert result[i] is None result[i] = v return result", - "docstring": "Stitch values together according to their indices. Args: values_and_indices_list: a list of tuples of values and indices indicating the values and positions in the returned list. Returns: a stitched list of values.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_utils.py", - "ast_data": "FunctionDef name:stitch_values arguments arg:values_and_indices_list Assign For Assign For If BoolOp For Call call:zip Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "is_transform_set", - "source_code": "def is_transform_set(self): return self._transformSet", - "docstring": "Return whether the Artist has an explicitly set transform. This is *True* after has been called.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\artist.py", - "ast_data": "FunctionDef name:is_transform_set arguments arg:self Return return:yes" - }, - { - "library": "sphinx", - "name": "get_js_stemmer_code", - "source_code": "def get_js_stemmer_code(self) -> str: if not self.lang.js_stemmer_rawcode: return self.lang.js_stemmer_code base_js_path = _MINIFIED_JS_PATH / 'base-stemmer.js' language_js_path = _MINIFIED_JS_PATH / self.lang.js_stemmer_rawcode return '\\n'.join((base_js_path.read_text(encoding = 'utf-8'), language_js_path.read_text(encoding = 'utf-8'), f'window.Stemmer = {self.lang.language_name}Stemmer;'))", - "docstring": "Returns JS code that will be inserted into language_data.js.", - "type": "method", - "file_path": "sphinx\\sphinx\\search\\__init__.py", - "ast_data": "FunctionDef name:get_js_stemmer_code arguments arg:self If Return return:yes Assign Assign Return return:yes" - }, - { - "library": "django", - "name": "get_model", - "source_code": "def get_model(self, model_name, require_ready = True): if require_ready: self.apps.check_models_ready() else: self.apps.check_apps_ready() try: return self.models[model_name.lower()] except KeyError: raise LookupError(\"App '%s' doesn't have a '%s' model.\" % (self.label, model_name))", - "docstring": "Return the model with the given case-insensitive model_name. Raise LookupError if no model exists with this name.", - "type": "method", - "file_path": "django\\django\\apps\\config.py", - "ast_data": "FunctionDef name:get_model arguments arg:self arg:model_name arg:require_ready If Try Return return:yes ExceptHandler Raise raises:LookupError(\"App '%s' doesn't have a '%s' model.\" % (self.label, model_name))" - }, - { - "library": "tensorflow", - "name": "__call__", - "source_code": "def __call__(self, shape, dtype = None, **kwargs): _validate_kwargs(self.__class__.__name__, kwargs) dtype = _assert_float_dtype(_get_dtype(dtype)) if _PARTITION_SHAPE in kwargs: shape = kwargs[_PARTITION_SHAPE] return self._random_generator.random_normal(shape, self.mean, self.stddev, dtype)", - "docstring": "Returns a tensor object initialized to random normal values. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. If not specified, is used, which default to unless you configured it otherwise (via ) **kwargs: Additional keyword arguments.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\initializers\\initializers_v2.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:shape arg:dtype kwarg:kwargs Assign Call call:_assert_float_dtype If Compare op:In Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "reset_partition_in_sparse_nn", - "source_code": "def reset_partition_in_sparse_nn(partition, new_partition = True): if in_embedding_region: embedding_partitions.append(partition) else: non_embedding_partitions.append(partition) if new_partition: partition = self.create_partition() partition.left_mem_bytes = available_mem_bytes return partition return None", - "docstring": "If crossing the boundary between non-embedding nodes and embedding nodes, create a new partition", - "type": "method", - "file_path": "pytorch\\torch\\fx\\experimental\\accelerator_partitioner.py", - "ast_data": "FunctionDef name:reset_partition_in_sparse_nn arguments arg:partition arg:new_partition If If Assign Call call:create_partition Assign Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_transform_rotates_text", - "source_code": "def get_transform_rotates_text(self): return self._transform_rotates_text", - "docstring": "Return whether rotations of the transform affect the text direction.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\text.py", - "ast_data": "FunctionDef name:get_transform_rotates_text arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_arrowstyle", - "source_code": "@_docstring.interpd def set_arrowstyle(self, arrowstyle = None, **kwargs): if arrowstyle is None: return ArrowStyle.pprint_styles() self._arrow_transmuter = ArrowStyle(arrowstyle, **kwargs) if isinstance(arrowstyle, str) else arrowstyle self.stale = True", - "docstring": "Set the arrow style, possibly with further attributes. Attributes from the previous arrow style are not reused. Without argument (or with `~matplotlib.patches.ArrowStyle.ArrowStyle.ArrowStyle` object, as documented in that class. The following arrow styles are available: %(ArrowStyle:table_and_accepts)s **kwargs Additional attributes for the arrow style. See the table above for supported parameters. Examples -------- :: set_arrowstyle(\"Fancy,head_length=0.2\") set_arrowstyle(\"fancy\", head_length=0.2)", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:set_arrowstyle arguments arg:self arg:arrowstyle kwarg:kwargs If Compare op:Is Return return:yes Assign Assign" - }, - { - "library": "tensorflow", - "name": "to_parted", - "source_code": "def to_parted(self) -> 'Layout': return Layout._new_object(layout = super().to_parted())", - "docstring": "Returns a \"parted\" layout from a static layout. A parted layout contains axes that are treated as independent by most of SPMD expanders. FIXME(b/285905569): The exact semantics is still being investigated.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py", - "ast_data": "FunctionDef name:to_parted arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "filesystem_set_configuration", - "source_code": "def filesystem_set_configuration(scheme, key, value, name = None): return _gen_filesystem_ops.file_system_set_configuration(scheme, key = key, value = value, name = name)", - "docstring": "Set configuration of the file system. Args: scheme: File system scheme. key: The name of the configuration option. value: The value of the configuration option. name: A name for the operation (optional). Returns: None.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\filesystem_ops.py", - "ast_data": "FunctionDef name:filesystem_set_configuration arguments arg:scheme arg:key arg:value arg:name Return return:yes" - }, - { - "library": "matplotlib", - "name": "process_selected", - "source_code": "def process_selected(self, ind, xs, ys): pass", - "docstring": "Default \"do nothing\" implementation of the method. Parameters ---------- ind : list of int The indices of the selected vertices. xs, ys : array-like The coordinates of the selected vertices.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\lines.py", - "ast_data": "FunctionDef name:process_selected arguments arg:self arg:ind arg:xs arg:ys" - }, - { - "library": "tensorflow", - "name": "should_overwrite", - "source_code": "def should_overwrite(filepath, overwrite): if not overwrite and os.path.isfile(filepath): return ask_to_proceed_with_overwrite(filepath) return True", - "docstring": "Returns whether the filepath should be overwritten.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saving_utils.py", - "ast_data": "FunctionDef name:should_overwrite arguments arg:filepath arg:overwrite If BoolOp Call call:isfile Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "JitPlugin", - "source_code": "class JitPlugin(CoveragePlugin): def dynamic_context(self, frame: Any) -> None: if frame.f_code.co_name = = 'should_drop': obj = frame.f_locals['fn'] if is_not_builtin_class(obj) or ismodule(obj) or ismethod(obj) or isfunction(obj) or iscode(obj): filename = getsourcefile(obj) if filename: try: sourcelines, starting_lineno = getsourcelines(obj) except OSError: pass else: line_data = {filename: range(starting_lineno, starting_lineno + len(sourcelines))} cov_data.add_lines(line_data) super().dynamic_context(frame)", - "docstring": "dynamic_context is an overridden function that gives us access to every frame run during the coverage process. We look for when the function being run is , as all functions that get passed into will be compiled and thus should be marked as covered.", - "type": "class", - "file_path": "pytorch\\tools\\coverage_plugins_package\\src\\coverage_plugins\\jit_plugin.py", - "ast_data": "ClassDef name:JitPlugin FunctionDef name:dynamic_context arguments arg:self arg:frame type:Any If Compare op:Eq Assign If BoolOp Call call:is_not_builtin_class Call call:ismodule Call call:ismethod Call call:isfunction Call call:iscode Assign Call call:getsourcefile If Try Assign Call call:getsourcelines ExceptHandler Assign" - }, - { - "library": "scipy", - "name": "is_valid_y", - "source_code": "def is_valid_y(y, warning = False, throw = False, name = None): y = _asarray(y) name_str = f\"'{name}' \" if name else '' try: if len(y.shape) ! = 1: raise ValueError(f'Condensed distance matrix {name_str}must have shape = 1 (i.e. be one-dimensional).') n = y.shape[0] d = int(np.ceil(np.sqrt(n * 2))) if d * (d - 1) / 2 ! = n: raise ValueError(f'Length n of condensed distance matrix {name_str}must be a binomial coefficient, i.e. there must be a k such that (k \\\\choose 2) = n)!') except Exception as e: if throw: raise if warning: warnings.warn(str(e), stacklevel = 2) return False return True", - "docstring": "Return True if the input array is a valid condensed distance matrix. Condensed distance matrices must be 1-dimensional numpy arrays. Their length must be a binomial coefficient :math: for some positive integer n. Parameters ---------- y : array_like The condensed distance matrix. warning : bool, optional Invokes a warning if the variable passed is not a valid condensed distance matrix. The warning message explains why the distance matrix is not valid. is used when referencing the offending variable. throw : bool, optional Throws an exception if the variable passed is not a valid condensed distance matrix. name : str, optional Used when referencing the offending variable in the warning or exception message. Returns ------- bool True if the input array is a valid condensed distance matrix, False otherwise. Examples -------- >>> from scipy.spatial.distance import is_valid_y This vector is a valid condensed distance matrix. The length is 6, which corresponds to `` is 6. >>> v = [1.0, 1.2, 1.0, 0.5, 1.3, 0.9] >>> is_valid_y(v) True An input vector with length, say, 7, is not a valid condensed distance matrix. >>> is_valid_y([1.1, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7]) False", - "type": "function", - "file_path": "scipy\\scipy\\spatial\\distance.py", - "ast_data": "FunctionDef name:is_valid_y arguments arg:y arg:warning arg:throw arg:name Assign Call call:_asarray Assign Try If Compare op:NotEq Raise raises:ValueError(f'Condensed distance matrix {name_str}must have shape=1 (i.e. be one-dimensional).') Assign Assign Call call:int If Compare op:NotEq Raise raises:ValueError(f'Length n of condensed distance matrix {name_str}must be a binomial coefficient, i.e. there must be a k such that (k \\\\choose 2)=n)!') ExceptHandler If Raise If Return return:yes Return return:yes" - }, - { - "library": "seaborn", - "name": "scale_categorical", - "source_code": "def scale_categorical(self, axis, order = None, formatter = None): _check_argument('axis', ['x', 'y'], axis) if axis not in self.variables: self.variables[axis] = None self.var_types[axis] = 'categorical' self.plot_data[axis] = '' if self.var_types[axis] = = 'numeric': self.plot_data = self.plot_data.sort_values(axis, kind = 'mergesort') cat_data = self.plot_data[axis].dropna() self._var_ordered[axis] = order is not None or cat_data.dtype.name = = 'category' order = pd.Index(categorical_order(cat_data, order), name = axis) if formatter is not None: cat_data = cat_data.map(formatter) order = order.map(formatter) else: cat_data = cat_data.astype(str) order = order.astype(str) self.var_levels[axis] = order self.var_types[axis] = 'categorical' self.plot_data[axis] = cat_data return self", - "docstring": "Enforce categorical (fixed-scale) rules for the data on given axis. Parameters ---------- axis : \"x\" or \"y\" Axis of the plot to operate on. order : list Order that unique values should appear in. formatter : callable Function mapping values to a string representation. Returns ------- self", - "type": "method", - "file_path": "seaborn\\seaborn\\_base.py", - "ast_data": "FunctionDef name:scale_categorical arguments arg:self arg:axis arg:order arg:formatter If Compare op:NotIn Assign Assign Assign If Compare op:Eq Assign Call call:sort_values Assign Call call:dropna Assign BoolOp Compare op:IsNot Compare op:Eq Assign Call call:Index If Compare op:IsNot Assign Call call:map Assign Call call:map Assign Call call:astype Assign Call call:astype Assign Assign Assign Return return:yes" - }, - { - "library": "sphinx", - "name": "fetch_inventory", - "source_code": "def fetch_inventory(app: Sphinx, uri: InventoryURI, inv: str) -> Inventory: return _fetch_inventory(target_uri = uri, inv_location = inv, config = _InvConfig.from_config(app.config), srcdir = app.srcdir).data", - "docstring": "Fetch, parse and return an intersphinx inventory file.", - "type": "function", - "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_load.py", - "ast_data": "FunctionDef name:fetch_inventory arguments arg:app type:Sphinx arg:uri type:InventoryURI arg:inv type:str Return return:yes" - }, - { - "library": "tensorflow", - "name": "expint", - "source_code": "@tf_export('math.special.expint') @dispatch.register_unary_elementwise_api @dispatch.add_dispatch_support def expint(x, name = None): with ops.name_scope(name, 'expint', [x]): return gen_special_math_ops.expint(x)", - "docstring": "Computes the Exponential integral of element-wise. The Exponential integral is defined as the integral of from to , with the domain of definition all positive real numbers. >>> tf.math.special.expint([1., 1.1, 2.1, 4.1]).numpy() array([ 1.8951179, 2.1673784, 5.3332353, 21.048464], dtype=float32) This implementation is based off of the Cephes math library. Args: x: A or . Must be one of the following types: , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.expi @end_compatibility", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py", - "ast_data": "FunctionDef name:expint arguments arg:x arg:name Call call:tf_export With Return return:yes" - }, - { - "library": "matplotlib", - "name": "cmd", - "source_code": "def cmd(expr: str, args: ParserElement) -> ParserElement: def names(elt: ParserElement) -> T.Generator[str, None, None]: if isinstance(elt, ParseExpression): for expr in elt.exprs: yield from names(expr) elif elt.resultsName: yield elt.resultsName csname = expr.split('{', 1)[0] err = csname + ''.join(('{%s}' % name for name in names(args))) if expr = = csname else expr return csname - (args | Error(f'Expected {err}'))", - "docstring": "Helper to define TeX commands. `` where the names in the error message are taken from element names in *args*. If *expr* already includes arguments (e.g. \"\\cmd{arg}{...}\"), then they are stripped when constructing the parse element, but kept (and *expr* is used as is) in the error message.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\_mathtext.py", - "ast_data": "FunctionDef name:cmd arguments arg:expr type:str arg:args type:ParserElement FunctionDef name:names arguments arg:elt type:ParserElement If Call call:isinstance For If Assign Assign Return return:yes" - }, - { - "library": "kornia", - "name": "laf_is_inside_image", - "source_code": "def laf_is_inside_image(laf: Tensor, images: Tensor, border: int = 0) -> Tensor: KORNIA_CHECK_LAF(laf) _, _, h, w = images.size() pts = laf_to_boundary_points(laf, 12) good_lafs_mask = (pts[..., 0] > = border) * (pts[..., 0] < = w - border) * (pts[..., 1] > = border) * (pts[..., 1] < = h - border) good_lafs_mask = good_lafs_mask.min(dim = 2)[0] return good_lafs_mask", - "docstring": "Check if the LAF is touching or partly outside the image boundary. Returns the mask of LAFs, which are fully inside the image, i.e. valid. Args: laf: :math:. images: images, lafs are detected in :math:. border: additional border. Returns: mask with shape :math:.", - "type": "function", - "file_path": "kornia\\kornia\\feature\\laf.py", - "ast_data": "FunctionDef name:laf_is_inside_image arguments arg:laf type:Tensor arg:images type:Tensor arg:border type:int Assign Call call:size Assign Call call:laf_to_boundary_points Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "zero_grad", - "source_code": "def zero_grad(self, set_to_none: bool = True): self._optim.zero_grad(set_to_none)", - "docstring": "Resets the gradients of all optimized :class: s. Args: set_to_none (bool): instead of setting to zero, set the grads to None. This will in general have lower memory footprint, and can modestly improve performance. However, it changes certain behaviors. For example: 1. When the user tries to access a gradient and perform manual ops on it, a None attribute or a Tensor full of 0s will behave differently. 2. If the user requests `` optimizers have a different behavior if the gradient is 0 or None (in one case it does the step with a gradient of 0 and in the other it skips the step altogether).", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_optim\\api.py", - "ast_data": "FunctionDef name:zero_grad arguments arg:self arg:set_to_none type:bool" - }, - { - "library": "pytorch", - "name": "DataProcessorTemplateWrapper", - "source_code": "class DataProcessorTemplateWrapper: def __init__(self, wrapped_template_cls, preprocessor, postprocessor, **kwargs) -> None: if preprocessor is not None: self._preprocessor = preprocessor else: self._preprocessor = lambda x, y: (x, y) if postprocessor is not None: self._postprocessor = postprocessor else: self._postprocessor = lambda x: x assert 'input_nodes' in kwargs assert 'layout' in kwargs kwargs['input_nodes'], kwargs['layout'] = preprocessor(kwargs['input_nodes'], kwargs['layout']) self._wrapped = wrapped_template_cls(**kwargs) def __getattr__(self, name): return getattr(self._wrapped, name) def maybe_append_choice(self, choices, **kwargs): return type(self._wrapped).maybe_append_choice(self, choices, **kwargs) def generate(self, **kwargs): choice_caller = self._wrapped.generate(**kwargs) return DataProcessorChoiceCallerWrapper(choice_caller, self._preprocessor, self._postprocessor) def __repr__(self) -> str: return f'DataProcessorTemplateWrapper({self._wrapped})'", - "docstring": "A wrapper class for a kernel template. This class together with provides a convenient way to preprocess and postprocess data before and after using the wrapped template. A typical usage is to reorder or filter the input nodes in order to match the expected input of other kernel choices like a ATen kernel. A more complicated usage is to prepack the weights. See the example from :mod: for more details.", - "type": "class", - "file_path": "pytorch\\torch\\_inductor\\select_algorithm.py", - "ast_data": "ClassDef name:DataProcessorTemplateWrapper FunctionDef name:__init__ arguments arg:self arg:wrapped_template_cls arg:preprocessor arg:postprocessor kwarg:kwargs If Compare op:IsNot Assign Assign If Compare op:IsNot Assign Assign Assign Call call:preprocessor Assign Call call:wrapped_template_cls FunctionDef name:__getattr__ arguments arg:self arg:name Return return:yes FunctionDef name:maybe_append_choice arguments arg:self arg:choices kwarg:kwargs Return return:yes FunctionDef name:generate arguments arg:self kwarg:kwargs Assign Call call:generate Return return:yes FunctionDef name:__repr__ arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "constant_", - "source_code": "@_sharded_op_impl(torch.nn.init.constant_) def constant_(types, args = (), kwargs = None, pg = None): validate_param(kwargs, 'kwargs') sharded_tensor = kwargs['tensor'] validate_param(sharded_tensor, 'tensor') val = kwargs['val'] validate_param(val, 'val') for shard in sharded_tensor.local_shards(): torch.nn.init.constant_(shard.tensor, val = val) return sharded_tensor", - "docstring": "Fills the input ShardedTensor with the value \\text{val}val. Args: tensor: tensor sharded across devices val: the value to fill the tensor with", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\_ops\\init.py", - "ast_data": "FunctionDef name:constant_ arguments arg:types arg:args arg:kwargs arg:pg Call call:_sharded_op_impl Assign Assign For Call call:local_shards Return return:yes" - }, - { - "library": "pytorch", - "name": "record_untuned_is_enabled", - "source_code": "def record_untuned_is_enabled() -> bool: return torch._C._cuda_record_untuned_is_enabled()", - "docstring": "Returns whether TunableOp operations are recorded for offline tuning.", - "type": "function", - "file_path": "pytorch\\torch\\cuda\\tunable.py", - "ast_data": "FunctionDef name:record_untuned_is_enabled arguments Return return:yes" - }, - { - "library": "tensorflow", - "name": "tensor_list_scatter", - "source_code": "def tensor_list_scatter(tensor, indices, element_shape = None, input_handle = None, name = None): tensor = ops.convert_to_tensor(tensor) if input_handle is not None: output_handle = gen_list_ops.tensor_list_scatter_into_existing_list(input_handle = input_handle, tensor = tensor, indices = indices, name = name) handle_data_util.copy_handle_data(input_handle, output_handle) return output_handle else: output_handle = gen_list_ops.tensor_list_scatter_v2(tensor = tensor, indices = indices, element_shape = _build_element_shape(element_shape), num_elements = -1, name = name) _set_handle_data(output_handle, element_shape, tensor.dtype) return output_handle", - "docstring": "Returns a TensorList created or updated by scattering .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\list_ops.py", - "ast_data": "FunctionDef name:tensor_list_scatter arguments arg:tensor arg:indices arg:element_shape arg:input_handle arg:name Assign Call call:convert_to_tensor If Compare op:IsNot Assign Call call:tensor_list_scatter_into_existing_list Return return:yes Assign Call call:tensor_list_scatter_v2 Return return:yes" - }, - { - "library": "pandas", - "name": "get_objs_combined_axis", - "source_code": "def get_objs_combined_axis(objs, intersect: bool = False, axis: Axis = 0, sort: bool = True) -> Index: obs_idxes = [obj._get_axis(axis) for obj in objs] return _get_combined_index(obs_idxes, intersect = intersect, sort = sort)", - "docstring": "Extract combined index: return intersection or union (depending on the value of \"intersect\") of indexes on given axis, or None if all objects lack indexes (e.g. they are numpy arrays). Parameters ---------- objs : list Series or DataFrame objects, may be mix of the two. intersect : bool, default False If True, calculate the intersection between indexes. Otherwise, calculate the union. axis : {0 or 'index', 1 or 'outer'}, default 0 The axis to extract indexes from. sort : bool, default True Whether the result index should come out sorted or not. Returns ------- Index", - "type": "function", - "file_path": "pandas\\pandas\\core\\indexes\\api.py", - "ast_data": "FunctionDef name:get_objs_combined_axis arguments arg:objs arg:intersect type:bool arg:axis type:Axis arg:sort type:bool Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "output_classes", - "source_code": "@property def output_classes(self): return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_classes(), self._element_spec)", - "docstring": "Returns the class of each component of an element of this iterator. The expected values are and . Returns: A nested structure of Python objects corresponding to each component of an element of this dataset.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py", - "ast_data": "FunctionDef name:output_classes arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "pdf", - "source_code": "def pdf(self, X, mean = None, rowcov = 1, colcov = 1): return np.exp(self.logpdf(X, mean, rowcov, colcov))", - "docstring": "Matrix normal probability density function. Parameters ---------- X : array_like Quantiles, with the last two axes of denoting the components. %(_matnorm_doc_default_callparams)s Returns ------- pdf : ndarray Probability density function evaluated at Notes ----- %(_matnorm_doc_callparams_note)s", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_multivariate.py", - "ast_data": "FunctionDef name:pdf arguments arg:self arg:X arg:mean arg:rowcov arg:colcov Return return:yes" - }, - { - "library": "tensorflow", - "name": "reshard", - "source_code": "def reshard(self, checkpoint_values: tensor.Tensor, shape_and_slice: str) -> tensor.Tensor: return _shard_from_cpu_to_sc(checkpoint_values, shape_and_slice, self._to_shard_layout)", - "docstring": "Reshards the checkpoint values according to the resharding plan. Args: checkpoint_values: The checkpoint values to be resharded. shape_and_slice: The shape and slice spec to be returned after resharding. Returns: The resharded tensor slice.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_checkpoint_adapter.py", - "ast_data": "FunctionDef name:reshard arguments arg:self arg:checkpoint_values type:tensor.Tensor arg:shape_and_slice type:str Return return:yes" - }, - { - "library": "pytorch", - "name": "setdefault", - "source_code": "def setdefault(self, key: str, default: Optional[Any] = None) -> Any: if key not in self: self[key] = default return self[key]", - "docstring": "Set the default for a key in the Parameterdict. If key is in the ParameterDict, return its value. If not, insert with a parameter and return . defaults to . Args: key (str): key to set default for default (Any): the parameter set to the key", - "type": "method", - "file_path": "pytorch\\torch\\nn\\modules\\container.py", - "ast_data": "FunctionDef name:setdefault arguments arg:self arg:key type:str arg:default type:Optional[Any] If Compare op:NotIn Assign Return return:yes" - }, - { - "library": "mongo", - "name": "get_key_by_alt_name", - "source_code": "def get_key_by_alt_name(self, key_alt_name: str) -> Optional[RawBSONDocument]: self._check_closed() assert self._key_vault_coll is not None return self._key_vault_coll.find_one({'keyAltNames': key_alt_name})", - "docstring": "Get a key document in the key vault collection that has the given ``. :param key_alt_name: (str): The key alternate name of the key to get. :return: The key document. .. versionadded:: 4.2", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\encryption.py", - "ast_data": "FunctionDef name:get_key_by_alt_name arguments arg:self arg:key_alt_name type:str Return return:yes" - }, - { - "library": "feincms", - "name": "ItemEditorForm", - "source_code": "class ItemEditorForm(forms.ModelForm): region = forms.CharField(widget = forms.HiddenInput()) ordering = forms.IntegerField(widget = forms.HiddenInput())", - "docstring": "The item editor form contains hidden region and ordering fields and should be used for all content type inlines.", - "type": "class", - "file_path": "feincms\\feincms\\admin\\item_editor.py", - "ast_data": "ClassDef name:ItemEditorForm Assign Call call:CharField Assign Call call:IntegerField" - }, - { - "library": "pytorch", - "name": "set_dtype_configs", - "source_code": "def set_dtype_configs(self, dtype_configs: list[DTypeConfig]) -> BackendPatternConfig: self.dtype_configs = dtype_configs return self", - "docstring": "Set the supported data types passed as arguments to quantize ops in the reference model spec, overriding all previously registered data types.", - "type": "method", - "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py", - "ast_data": "FunctionDef name:set_dtype_configs arguments arg:self arg:dtype_configs type:list[DTypeConfig] Assign Return return:yes" - }, - { - "library": "pandas", - "name": "take", - "source_code": "def take(self, indices, *, allow_fill: bool = False, fill_value = None, axis = None, **kwargs) -> Self: nv.validate_take((), kwargs) fill_left = fill_right = fill_value if allow_fill: fill_left, fill_right = self._validate_scalar(fill_value) left_take = take(self._left, indices, allow_fill = allow_fill, fill_value = fill_left) right_take = take(self._right, indices, allow_fill = allow_fill, fill_value = fill_right) return self._shallow_copy(left_take, right_take)", - "docstring": "Take elements from the IntervalArray. Parameters ---------- indices : sequence of integers Indices to be taken. allow_fill : bool, default False How to handle negative values in . * False: negative values in indicate positional indices from the right (the default). This is similar to :func:. * True: negative values in indicate missing values. These values are set to . Any other other negative values raise a `allow_fillfill_valuefill_valueindicesallow_fill` is True.", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\interval.py", - "ast_data": "FunctionDef name:take arguments arg:self arg:indices kwarg:kwargs Assign If Assign Call call:_validate_scalar Assign Call call:take Assign Call call:take Return return:yes" - }, - { - "library": "pytorch", - "name": "get_subexpr_involving_symbol", - "source_code": "@classmethod def get_subexpr_involving_symbol(cls, expr: Expr, symbol: Symbol) -> Expr: expr = cls._preprocess(expr) return sympy.S.Zero + sum((term for term in sympy.Add.make_args(expr) if symbol in term.free_symbols))", - "docstring": "Given a sympy expression, return the subexpression comprised only of terms involving the specified symbol. For example, if is , and is , this returns .", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\block_analysis.py", - "ast_data": "FunctionDef name:get_subexpr_involving_symbol arguments arg:cls arg:expr type:Expr arg:symbol type:Symbol Assign Call call:_preprocess Return return:yes" - }, - { - "library": "django", - "name": "SQLiteCursorWrapper", - "source_code": "class SQLiteCursorWrapper(Database.Cursor): def execute(self, query, params = None): if params is None: return super().execute(query) param_names = list(params) if isinstance(params, Mapping) else None query = self.convert_query(query, param_names = param_names) return super().execute(query, params) def executemany(self, query, param_list): peekable, param_list = tee(iter(param_list)) if (params: = next(peekable, None)) and isinstance(params, Mapping): param_names = list(params) else: param_names = None query = self.convert_query(query, param_names = param_names) return super().executemany(query, param_list) def convert_query(self, query, *, param_names = None): if param_names is None: return FORMAT_QMARK_REGEX.sub('?', query).replace('%%', '%') else: return query % {name: f': {name}' for name in param_names}", - "docstring": "Django uses the \"format\" and \"pyformat\" styles, but Python's sqlite3 module supports neither of these styles. This wrapper performs the following conversions: - \"format\" style to \"qmark\" style - \"pyformat\" style to \"named\" style In both cases, if you want to use a literal \"%s\", you'll need to use \"%%s\".", - "type": "class", - "file_path": "django\\django\\db\\backends\\sqlite3\\base.py", - "ast_data": "ClassDef name:SQLiteCursorWrapper FunctionDef name:execute arguments arg:self arg:query arg:params If Compare op:Is Return return:yes Assign Assign Call call:convert_query Return return:yes FunctionDef name:executemany arguments arg:self arg:query arg:param_list Assign Call call:tee If BoolOp Call call:isinstance Assign Call call:list Assign Assign Call call:convert_query Return return:yes FunctionDef name:convert_query arguments arg:self arg:query If Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_or_create_variables_dir", - "source_code": "def get_or_create_variables_dir(export_dir): variables_dir = get_variables_dir(export_dir) file_io.recursive_create_dir(variables_dir) return variables_dir", - "docstring": "Return variables sub-directory, or create one if it doesn't exist.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\saved_model\\path_helpers.py", - "ast_data": "FunctionDef name:get_or_create_variables_dir arguments arg:export_dir Assign Call call:get_variables_dir Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_y", - "source_code": "def set_y(self, y): self._y = y self.stale = True", - "docstring": "Set the *y* position of the text. Parameters ---------- y : float", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\text.py", - "ast_data": "FunctionDef name:set_y arguments arg:self arg:y Assign Assign" - }, - { - "library": "matplotlib", - "name": "get_aux_axes", - "source_code": "def get_aux_axes(self, tr = None, viewlim_mode = 'equal', axes_class = None, **kwargs): if axes_class is None: axes_class = self._base_axes_class parasite_axes_class = parasite_axes_class_factory(axes_class) ax2 = parasite_axes_class(self, tr, viewlim_mode = viewlim_mode, **kwargs) self.parasites.append(ax2) ax2._remove_method = self.parasites.remove return ax2", - "docstring": "Add a parasite axes to this host. Despite this method's name, this should actually be thought of as an `~matplotlib.transforms.Transform.Transform~matplotlib.axes.Axes~.axes.Axes` subclass that is instantiated. If None, the base class of the host axes is used. **kwargs Other parameters are forwarded to the parasite axes constructor.", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\parasite_axes.py", - "ast_data": "FunctionDef name:get_aux_axes arguments arg:self arg:tr arg:viewlim_mode arg:axes_class kwarg:kwargs If Compare op:Is Assign Assign Call call:parasite_axes_class_factory Assign Call call:parasite_axes_class Assign Return return:yes" - }, - { - "library": "numpy", - "name": "upgrade", - "source_code": "def upgrade(self, value): self._checked = True try: return self._strict_call(value) except ValueError: self._do_upgrade() return self.upgrade(value)", - "docstring": "Find the best converter for a given string, and return the result. The supplied string is converted by testing different converters in order. First the method of the instance is tried, if this fails other available converters are tried. The order in which these other converters are tried is determined by the attribute of the instance. Parameters ---------- value : str The string to convert. Returns ------- out : any The result of converting with the appropriate converter.", - "type": "method", - "file_path": "numpy\\numpy\\lib\\_iotools.py", - "ast_data": "FunctionDef name:upgrade arguments arg:self arg:value Assign Try Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "pytorch", - "name": "add_metadata_json", - "source_code": "def add_metadata_json(self, key: str, value: str): torch.autograd._add_metadata_json(key, value)", - "docstring": "Adds a user defined metadata with a string key and a valid json value into the trace file", - "type": "method", - "file_path": "pytorch\\torch\\profiler\\profiler.py", - "ast_data": "FunctionDef name:add_metadata_json arguments arg:self arg:key type:str arg:value type:str" - }, - { - "library": "pytorch", - "name": "type", - "source_code": "@staticmethod def type() -> str: raise RuntimeError('CacheArtifact is an abstract class, please use a subclass')", - "docstring": "Returns the type of the artifact. Must be unique across all CacheArtifact classes. CacheArtifactFactory.register will add property method to CacheInfo based on this (def {type}_artifacts) that returns all artifacts for specific cache.", - "type": "method", - "file_path": "pytorch\\torch\\compiler\\_cache.py", - "ast_data": "FunctionDef name:type arguments Raise raises:RuntimeError('CacheArtifact is an abstract class, please use a subclass')" - }, - { - "library": "tensorflow", - "name": "__nonzero__", - "source_code": "def __nonzero__(self): self._disallow_bool_casting()", - "docstring": "Dummy method to prevent a tensor from being used as a Python . This is the Python 2.x counterpart to above. Raises: .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py", - "ast_data": "FunctionDef name:__nonzero__ arguments arg:self" - }, - { - "library": "django", - "name": "listdir", - "source_code": "def listdir(self, path): raise NotImplementedError('subclasses of Storage must provide a listdir() method')", - "docstring": "List the contents of the specified path. Return a 2-tuple of lists: the first item being directories, the second item being files.", - "type": "method", - "file_path": "django\\django\\core\\files\\storage\\base.py", - "ast_data": "FunctionDef name:listdir arguments arg:self arg:path Raise raises:NotImplementedError('subclasses of Storage must provide a listdir() method')" - }, - { - "library": "tensorflow", - "name": "with_accounted_types", - "source_code": "def with_accounted_types(self, account_type_regexes): self._options['account_type_regexes'] = copy.copy(account_type_regexes) return self", - "docstring": "Selectively counting statistics based on node types. Here, 'types' means the profiler nodes' properties. Profiler by default consider device name (e.g. /job:xx/.../device:GPU:0) and operation type (e.g. MatMul) as profiler nodes' properties. User can also associate customized 'types' to profiler nodes through OpLogProto proto. For example, user can select profiler nodes placed on gpu:0 with: If none of a node's properties match the specified regexes, the node is not displayed nor accounted. Args: account_type_regexes: A list of regexes specifying the types. Returns: self.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\profiler\\option_builder.py", - "ast_data": "FunctionDef name:with_accounted_types arguments arg:self arg:account_type_regexes Assign Call call:copy Return return:yes" - }, - { - "library": "tensorflow", - "name": "experimental_from_proto", - "source_code": "@classmethod def experimental_from_proto(cls, proto: types_pb2.SerializedDType) -> 'DType': return DType(proto.datatype)", - "docstring": "Returns a Dtype instance based on the serialized proto.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py", - "ast_data": "FunctionDef name:experimental_from_proto arguments arg:cls arg:proto type:types_pb2.SerializedDType Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_pull_request", - "source_code": "def get_pull_request(project, num, auth = False): url = f'https: //api.github.com/repos/{project}/pulls/{num}' if auth: header = make_auth_header() else: header = None print('fetching %s' % url, file = sys.stderr) response = requests.get(url, headers = header) response.raise_for_status() return json.loads(response.text, object_hook = Obj)", - "docstring": "Return the pull request info for a given PR number.", - "type": "function", - "file_path": "matplotlib\\tools\\gh_api.py", - "ast_data": "FunctionDef name:get_pull_request arguments arg:project arg:num arg:auth Assign If Assign Call call:make_auth_header Assign Assign Call call:get Return return:yes" - }, - { - "library": "pytorch", - "name": "check_export_model_diff", - "source_code": "def check_export_model_diff(model: torch.nn.Module | torch.jit.ScriptModule, test_input_groups: Sequence[tuple[tuple[Any, ...], Mapping[str, Any]]], export_options: _experimental.ExportOptions | None = None) -> str: export_options = _experimental.ExportOptions() if export_options is None else export_options jit_diff_report = _check_graph_diff(model, test_input_groups, export_options, _traced_graph_from_model) if jit_diff_report: return jit_diff_report return _check_graph_diff(model, test_input_groups, export_options, _onnx_graph_from_model)", - "docstring": "Verify exported model discrepancy between different groups of inputs. A graph is exported for each group of inputs. The exported graphs are then compared to each other, and discrepancies of first pair of nodes are reported. This function first checks the jit graph. If no discrepancies were found, it then checks the onnx graph. Unless otherwise specified, the jit/ONNX graph is expected to be the same, regardless of the inputs used for exporting. A discrepancy implies the graph exported is not accurate when run on other groups of inputs, which will typically results in runtime errors or mismatching output. Args: model (torch.nn.Module or torch.jit.ScriptModule): The model to be exported. test_input_groups (Sequence[Tuple[Tuple[Any, ...], Mapping[str, Any]]]): A sequence of input groups to be used to export the model. Each input group is a pair of (args, kwargs). export_options (_experimental.ExportOptions, optional): An _experimental.ExportOptions object that controls the export behavior. Returns: str: A string containing the diff of the exported models.", - "type": "function", - "file_path": "pytorch\\torch\\onnx\\verification.py", - "ast_data": "FunctionDef name:check_export_model_diff arguments arg:model type:torch.nn.Module | torch.jit.ScriptModule arg:test_input_groups type:Sequence[tuple[tuple[Any, ...], Mapping[str, Any]]] arg:export_options type:_experimental.ExportOptions | None Assign Assign Call call:_check_graph_diff If Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "as_dict", - "source_code": "def as_dict(self): ret = {} for job in self.jobs: task_indices = self.task_indices(job) if len(task_indices) = = 0: ret[job] = {} continue if max(task_indices) + 1 = = len(task_indices): ret[job] = self.job_tasks(job) else: ret[job] = {i: self.task_address(job, i) for i in task_indices} return ret", - "docstring": "Returns a dictionary from job names to their tasks. For each job, if the task index space is dense, the corresponding value will be a list of network addresses; otherwise it will be a dictionary mapping (sparse) task indices to the corresponding addresses. Returns: A dictionary mapping job names to lists or dictionaries describing the tasks in those jobs.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\server_lib.py", - "ast_data": "FunctionDef name:as_dict arguments arg:self Assign For Assign Call call:task_indices If Compare op:Eq Assign If Compare op:Eq Assign Call call:job_tasks Assign Return return:yes" - }, - { - "library": "django", - "name": "savepoint", - "source_code": "@async_unsafe def savepoint(self): if not self._savepoint_allowed(): return thread_ident = _thread.get_ident() tid = str(thread_ident).replace('-', '') self.savepoint_state + = 1 sid = 's%s_x%d' % (tid, self.savepoint_state) self.validate_thread_sharing() self._savepoint(sid) return sid", - "docstring": "Create a savepoint inside the current transaction. Return an identifier for the savepoint that will be used for the subsequent rollback or commit. Do nothing if savepoints are not supported.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\base.py", - "ast_data": "FunctionDef name:savepoint arguments arg:self If Return return:no Assign Call call:get_ident Assign Call call:replace Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "process_signature", - "source_code": "def process_signature(line: str) -> list[str]: tokens: list[str] = split_outside_bracket(line) for i, token in enumerate(tokens): tokens[i] = token.strip(' ') if token = = 'cls': tokens[i] = 'self' elif i > 0 and 'self' = = tokens[i - 1] and (tokens[i][0] ! = '*'): tokens[i] = '' elif 'Callable = ' in token: head = token.rpartition(' = ')[0] tokens[i] = head.strip(' ') + ' = ...' tokens = [t for t in tokens if t ! = ''] return tokens", - "docstring": "Clean up a given raw function signature. This includes removing the self-referential datapipe argument, default arguments of input functions, newlines, and spaces.", - "type": "function", - "file_path": "pytorch\\torch\\utils\\data\\datapipes\\gen_pyi.py", - "ast_data": "FunctionDef name:process_signature arguments arg:line type:str For Call call:enumerate Assign Call call:strip If Compare op:Eq Assign If BoolOp Compare op:Gt Compare op:Eq Compare op:NotEq Assign If Compare op:In Assign Assign Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_dpi", - "source_code": "def set_dpi(self, val): self._parent.dpi = val self.stale = True", - "docstring": "Set the resolution of parent figure in dots-per-inch. Parameters ---------- val : float", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\figure.py", - "ast_data": "FunctionDef name:set_dpi arguments arg:self arg:val Assign Assign" - }, - { - "library": "algorithms", - "name": "min_distance_dp", - "source_code": "def min_distance_dp(word1, word2): length1, length2 = (len(word1) + 1, len(word2) + 1) res = [[0 for _ in range(length2)] for _ in range(length1)] if length1 = = length2: for i in range(1, length1): res[i][0], res[0][i] = (i, i) else: for i in range(length1): res[i][0] = i for i in range(length2): res[0][i] = i for i in range(1, length1): for j in range(1, length2): if word1[i - 1] = = word2[j - 1]: res[i][j] = res[i - 1][j - 1] else: res[i][j] = min(res[i - 1][j], res[i][j - 1]) + 1 return res[len(word1)][len(word2)]", - "docstring": "Finds minimum distance in a dynamic programming manner TC: O(length1*length2), SC: O(length1*length2) :type word1: str :type word2: str :rtype: int", - "type": "function", - "file_path": "algorithms\\algorithms\\strings\\min_distance.py", - "ast_data": "FunctionDef name:min_distance_dp arguments arg:word1 arg:word2 Assign Assign If Compare op:Eq For Call call:range Assign For Call call:range Assign For Call call:range Assign For Call call:range For Call call:range If Compare op:Eq Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "run_graph", - "source_code": "def run_graph(self, device, n, m, k, transpose_a, transpose_b, num_iters, dtype): graph = ops.Graph() with graph.as_default(): output = build_graph(device, n, m, k, transpose_a, transpose_b, dtype) with session_lib.Session(graph = graph) as session: variables.global_variables_initializer().run() for _ in range(500): session.run(output) start_time = time.time() for _ in range(num_iters): session.run(output) duration = time.time() - start_time num_items = n * m * k * 2 throughput = num_items * num_iters / duration / 1000000000.0 print('%s %s input_info: %s %d %.4fsec, %.4fGitems/s.' % (device, str(dtype), str(n) + 'x' + str(m) + 'x' + str(k) + ', ta: ' + str(transpose_a) + '.tb: ' + str(transpose_b), num_iters, duration, throughput)) name_template = 'matmul_{device}_{dtype}_input_info_{inputinfo}' self.report_benchmark(name = name_template.format(device = device, dtype = str(dtype).replace(' ', ''), inputinfo = str(n) + 'x' + str(m) + 'x' + str(k) + ', ta: ' + str(transpose_a) + ', tb: ' + str(transpose_b)).replace(' ', ''), iters = num_iters, wall_time = duration) return duration", - "docstring": "Run the graph and print its execution time. Args: device: String, the device to run on. n: tensor A's first dimension size. m: tensor A's second dimension size. k: tensor B's second dimension size. transpose_a: boolean value to show if tensor A is transposed. transpose_b: boolean value to show if tensor B is transposed. num_iters: number of iterations to run the benchmark. dtype: numpy data type of the input tensor. Returns: The duration of the run in seconds.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\matmul_benchmark.py", - "ast_data": "FunctionDef name:run_graph arguments arg:self arg:device arg:n arg:m arg:k arg:transpose_a arg:transpose_b arg:num_iters arg:dtype Assign Call call:Graph With Assign Call call:build_graph With For Call call:range Assign Call call:time For Call call:range Assign Assign Assign Assign Return return:yes" - }, - { - "library": "django", - "name": "__init__", - "source_code": "def __init__(self, feat, layer): if not feat: raise GDALException('Cannot create OGR Feature, invalid pointer given.') self.ptr = feat self._layer = layer", - "docstring": "Initialize Feature from a pointer and its Layer object.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\feature.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:feat arg:layer If Raise raises:GDALException('Cannot create OGR Feature, invalid pointer given.') Assign Assign" - }, - { - "library": "pytorch", - "name": "patch_torch_manual_seed", - "source_code": "@functools.lru_cache(None) def patch_torch_manual_seed(): def deterministic_torch_manual_seed(*args, **kwargs): from torch._C import default_generator seed = 1337 if HAS_CUDA: import torch.cuda if not torch.cuda._is_in_bad_fork(): torch.cuda.manual_seed_all(seed) if HAS_XPU: import torch.xpu if not torch.xpu._is_in_bad_fork(): torch.xpu.manual_seed_all(seed) return default_generator.manual_seed(seed) torch.manual_seed = deterministic_torch_manual_seed", - "docstring": "Make torch manual seed deterministic. Helps with accuracy testing.", - "type": "function", - "file_path": "pytorch\\benchmarks\\dynamo\\common.py", - "ast_data": "FunctionDef name:patch_torch_manual_seed arguments Call call:lru_cache FunctionDef name:deterministic_torch_manual_seed arguments vararg:args kwarg:kwargs Assign If If If If Return return:yes Assign" - }, - { - "library": "sphinx", - "name": "NoUri", - "source_code": "class NoUri(Exception): pass", - "docstring": "Raised by builder.get_relative_uri() or from missing-reference handlers if there is no URI available.", - "type": "class", - "file_path": "sphinx\\sphinx\\errors.py", - "ast_data": "ClassDef name:NoUri" - }, - { - "library": "django", - "name": "parse_file_upload", - "source_code": "def parse_file_upload(self, META, post_data): self.upload_handlers = ImmutableList(self.upload_handlers, warning = 'You cannot alter upload handlers after the upload has been processed.') parser = MultiPartParser(META, post_data, self.upload_handlers, self.encoding) return parser.parse()", - "docstring": "Return a tuple of (POST QueryDict, FILES MultiValueDict).", - "type": "method", - "file_path": "django\\django\\http\\request.py", - "ast_data": "FunctionDef name:parse_file_upload arguments arg:self arg:META arg:post_data Assign Call call:ImmutableList Assign Call call:MultiPartParser Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, cell): self._cell = cell", - "docstring": "Creates a new StringGaugeCell. Args: cell: A c pointer of TFE_MonitoringStringGaugeCell.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:cell Assign" - }, - { - "library": "scikit-learn", - "name": "fit", - "source_code": "def fit(self, X, y = None): self.fit_predict(X, y) return self", - "docstring": "Estimate model parameters with the EM algorithm. The method fits the model `` is ignored and a single initialization is performed upon the first call. Upon consecutive calls, training starts where it left off. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object The fitted mixture.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\mixture\\_base.py", - "ast_data": "FunctionDef name:fit arguments arg:self arg:X arg:y Return return:yes" - }, - { - "library": "sphinx", - "name": "convert", - "source_code": "def convert(self, _from: str | os.PathLike[str], _to: str | os.PathLike[str]) -> bool: raise NotImplementedError", - "docstring": "Convert an image file to the expected format. *_from* is a path of the source image file, and *_to* is a path of the destination file.", - "type": "method", - "file_path": "sphinx\\sphinx\\transforms\\post_transforms\\images.py", - "ast_data": "FunctionDef name:convert arguments arg:self arg:_from type:str | os.PathLike[str] arg:_to type:str | os.PathLike[str] Raise raises:NotImplementedError" - }, - { - "library": "mongo", - "name": "close", - "source_code": "async def close(self) -> None: if self._publish: assert self._listener is not None assert self._events is not None self._events.put((self._listener.publish_server_closed, (self._description.address, self._topology_id))) if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log(_SDAM_LOGGER, message = _SDAMStatusMessage.STOP_SERVER, topologyId = self._topology_id, serverHost = self._description.address[0], serverPort = self._description.address[1]) await self._monitor.close() await self._pool.close()", - "docstring": "Clear the connection pool and stop the monitor. Reconnect with open().", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\server.py", - "ast_data": "AsyncFunctionDef name:close arguments arg:self If If Call call:isEnabledFor" - }, - { - "library": "scikit-learn", - "name": "apply", - "source_code": "def apply(self, X, check_input = True): check_is_fitted(self) X = self._validate_X_predict(X, check_input) return self.tree_.apply(X)", - "docstring": "Return the index of the leaf that each sample is predicted as. .. versionadded:: 0.17 Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``, possibly with gaps in the numbering.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\tree\\_classes.py", - "ast_data": "FunctionDef name:apply arguments arg:self arg:X arg:check_input Assign Call call:_validate_X_predict Return return:yes" - }, - { - "library": "scipy", - "name": "Problem07", - "source_code": "class Problem07(Benchmark): def __init__(self, dimensions = 1): Benchmark.__init__(self, dimensions) self._bounds = [(2.7, 7.5)] self.global_optimum = 5.19978 self.fglob = -1.6013 def fun(self, x, *args): self.nfev + = 1 x = x[0] return sin(x) + sin(10.0 / 3.0 * x) + log(x) - 0.84 * x + 3", - "docstring": "Univariate Problem07 objective function. This class defines the Univariate Problem07 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem07}}(x) = \\sin(x) + \\sin \\left(\\frac{10}{3}x \\right) + \\log(x) - 0.84x + 3 Bound constraints: :math: .. figure:: figures/Problem07.png :alt: Univariate Problem07 function :align: center **Univariate Problem07 function** *Global optimum*: :math: for :math:", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py", - "ast_data": "ClassDef name:Problem07 FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Assign Return return:yes" - }, - { - "library": "algorithms", - "name": "maximum", - "source_code": "def maximum(self, node): temp_node = node while temp_node.right is not None: temp_node = temp_node.right return temp_node", - "docstring": "find the max node when node regard as a root node :param node: :return: max node", - "type": "method", - "file_path": "algorithms\\algorithms\\tree\\red_black_tree\\red_black_tree.py", - "ast_data": "FunctionDef name:maximum arguments arg:self arg:node Assign While Compare op:IsNot Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "flag", - "source_code": "def flag() -> None: set_cmap('flag')", - "docstring": "Set the colormap to 'flag'. This changes the default colormap as well as the colormap of the current image if there is one. See `` for more information.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", - "ast_data": "FunctionDef name:flag arguments" - }, - { - "library": "pytorch", - "name": "get_num_sms", - "source_code": "def get_num_sms() -> int: carveout = torch._C._get_sm_carveout_experimental() return get_max_num_sms() - (carveout if carveout is not None else 0)", - "docstring": "Handle experimental carveout if set otherwise return hardware SM count", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\utils.py", - "ast_data": "FunctionDef name:get_num_sms arguments Assign Call call:_get_sm_carveout_experimental Return return:yes" - }, - { - "library": "scipy", - "name": "kulczynski1", - "source_code": "@_deprecated_kulczynski1 def kulczynski1(u, v, *, w = None): u = _validate_vector(u) v = _validate_vector(v) if w is not None: w = _validate_weights(w) _, nft, ntf, ntt = _nbool_correspond_all(u, v, w = w) return ntt / (ntf + nft)", - "docstring": "Compute the Kulczynski 1 dissimilarity between two boolean 1-D arrays. .. deprecated:: 1.15.0 This function is deprecated and will be removed in SciPy 1.17.0. Replace usage of `uvc_{ij}\\mathtt{u[k]} = i\\mathtt{v[k]} = jk \\in {0, 1, ..., n-1}uvuv`. Notes ----- This measure has a minimum value of 0 and no upper limit. It is un-defined when there are no non-matches. .. versionadded:: 1.8.0 References ---------- .. [1] Kulczynski S. et al. Bulletin International de l'Academie Polonaise des Sciences et des Lettres, Classe des Sciences Mathematiques et Naturelles, Serie B (Sciences Naturelles). 1927; Supplement II: 57-203. Examples -------- >>> from scipy.spatial import distance >>> distance.kulczynski1([1, 0, 0], [0, 1, 0]) 0.0 >>> distance.kulczynski1([True, False, False], [True, True, False]) 1.0 >>> distance.kulczynski1([True, False, False], [True]) 0.5 >>> distance.kulczynski1([1, 0, 0], [3, 1, 0]) -3.0", - "type": "function", - "file_path": "scipy\\scipy\\spatial\\distance.py", - "ast_data": "FunctionDef name:kulczynski1 arguments arg:u arg:v Assign Call call:_validate_vector Assign Call call:_validate_vector If Compare op:IsNot Assign Call call:_validate_weights Assign Call call:_nbool_correspond_all Return return:yes" - }, - { - "library": "scrapy", - "name": "binary_is_text", - "source_code": "def binary_is_text(data: bytes) -> bool: if not isinstance(data, bytes): raise TypeError(f\"data must be bytes, got '{type(data).__name__}'\") return all((c not in _BINARYCHARS for c in data))", - "docstring": "Returns `` object) does not contain unprintable control characters.", - "type": "function", - "file_path": "scrapy\\scrapy\\utils\\python.py", - "ast_data": "FunctionDef name:binary_is_text arguments arg:data type:bytes If Raise raises:TypeError(f\"data must be bytes, got '{type(data).__name__}'\") Return return:yes" - }, - { - "library": "tensorflow", - "name": "create_dummy_tensor", - "source_code": "def create_dummy_tensor(spec): if hasattr(spec, '_create_empty_value'): return spec._create_empty_value() if isinstance(spec, ragged_tensor.RaggedTensorSpec): feature_shape = spec._shape[: 1].concatenate(spec._shape[1 + spec._ragged_rank:]) feature_type = spec._dtype else: feature_shape = spec.shape feature_type = spec.dtype dims = [dim if dim is not None else 0 for dim in feature_shape.as_list()] if feature_shape else [] if dims and (isinstance(spec, ragged_tensor.RaggedTensorSpec) or feature_shape.is_fully_defined()): dims[0] = tensor_shape.Dimension(0) if isinstance(spec, sparse_tensor.SparseTensorSpec): return sparse_tensor.SparseTensor(values = array_ops.zeros(0, feature_type), indices = array_ops.zeros((0, len(dims)), dtypes.int64), dense_shape = dims) dummy_tensor = array_ops.zeros(tensor_shape.TensorShape(dims), feature_type) if isinstance(spec, ragged_tensor.RaggedTensorSpec): row_splits = array_ops.zeros(1, spec._row_splits_dtype) dummy_tensor = ragged_tensor.RaggedTensor.from_nested_row_splits(dummy_tensor, (row_splits,) * spec._ragged_rank, validate = False) return dummy_tensor", - "docstring": "Create a dummy tensor with possible batch dimensions set to 0.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\input_lib.py", - "ast_data": "FunctionDef name:create_dummy_tensor arguments arg:spec If Call call:hasattr Return return:yes If Call call:isinstance Assign Call call:concatenate Assign Assign Assign Assign If BoolOp BoolOp Call call:isinstance Call call:is_fully_defined Assign Call call:Dimension If Call call:isinstance Return return:yes Assign Call call:zeros If Call call:isinstance Assign Call call:zeros Assign Call call:from_nested_row_splits Return return:yes" - }, - { - "library": "numpy", - "name": "expandtabs", - "source_code": "def expandtabs(self, tabsize = 8): return asarray(expandtabs(self, tabsize))", - "docstring": "Return a copy of each string element where all tab characters are replaced by one or more spaces. See Also -------- char.expandtabs", - "type": "method", - "file_path": "numpy\\numpy\\_core\\defchararray.py", - "ast_data": "FunctionDef name:expandtabs arguments arg:self arg:tabsize Return return:yes" - }, - { - "library": "tensorflow", - "name": "with_dtype", - "source_code": "def with_dtype(self, dtype): dtype = dtypes.as_dtype(dtype) if dtype not in (dtypes.int32, dtypes.int64): raise ValueError('dtype must be int32 or int64') if self.dtype = = dtype: return self return RowPartition(row_splits = _cast_if_not_none(self._row_splits, dtype), row_lengths = _cast_if_not_none(self._row_lengths, dtype), value_rowids = _cast_if_not_none(self._value_rowids, dtype), nrows = _cast_if_not_none(self._nrows, dtype), uniform_row_length = _cast_if_not_none(self._uniform_row_length, dtype), internal = _row_partition_factory_key)", - "docstring": "Returns a copy of this RowPartition with the given encoding dtype. Args: dtype: The dtype for encoding tensors, such as and . One of or . Returns: A copy of this RowPartition, with the encoding tensors cast to the given type.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", - "ast_data": "FunctionDef name:with_dtype arguments arg:self arg:dtype Assign Call call:as_dtype If Compare op:NotIn Raise raises:ValueError('dtype must be int32 or int64') If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "report_proto_path", - "source_code": "def report_proto_path(self, trace_dir, summary_tag_name): filename = _TT_REPORT_PROTO + '.' + summary_tag_name.replace('/', '_') return os.path.join(trace_dir, filename)", - "docstring": "Returns the path where report proto should be written. Args: trace_dir: String denoting the trace directory. summary_tag_name: Name of the unique tag that relates to the report. Returns: A string denoting the path to the report proto.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_report.py", - "ast_data": "FunctionDef name:report_proto_path arguments arg:self arg:trace_dir arg:summary_tag_name Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "incomplete_size", - "source_code": "def incomplete_size(self, name = None): if name is None: name = '%s_incomplete_size' % self._name return self._incomplete_size_fn(shared_name = self._name, name = name, dtypes = self._dtypes, capacity = self._capacity, memory_limit = self._memory_limit)", - "docstring": "Returns the number of incomplete elements in the staging area. Args: name: A name for the operation (optional) Returns: The created op", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py", - "ast_data": "FunctionDef name:incomplete_size arguments arg:self arg:name If Compare op:Is Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, sess): _check_type(sess, (session.BaseSession, monitored_session.MonitoredSession)) self.session = sess", - "docstring": "Constructor. Args: sess: A tensorflow Session object.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:sess Assign" - }, - { - "library": "pytorch", - "name": "is_valid_experiment_name", - "source_code": "def is_valid_experiment_name(experiment_name: str) -> bool: valid_char_regex = '^[a-zA-Z0-9]([\\\\w-]*[a-zA-Z0-9])?$' valid = bool(re.match(valid_char_regex, experiment_name)) if valid: return True log.error(f\"Invalid experiment name: {experiment_name}. Experiment names should only contain alphanumeric characters, '_', and '-'. They cannot contain spaces, and the special characters '_' and '-' cannot be the first or last characters.\") return False", - "docstring": "Check if the experiment name is valid. A valid name: - Contains only alphanumeric characters and the special characters \"_\" & \"-\" - The special characters \"_\" & \"-\" shouldn't be the first or last characters - Cannot contain spaces", - "type": "function", - "file_path": "pytorch\\.github\\scripts\\runner_determinator.py", - "ast_data": "FunctionDef name:is_valid_experiment_name arguments arg:experiment_name type:str Assign Assign Call call:bool If Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_recurrent_dropout_mask_for_cell", - "source_code": "def get_recurrent_dropout_mask_for_cell(self, inputs, training, count = 1): if self.recurrent_dropout = = 0: return None init_kwargs = dict(inputs = inputs, training = training, count = count) return self._recurrent_dropout_mask_cache.setdefault(kwargs = init_kwargs)", - "docstring": "Get the recurrent dropout mask for RNN cell. It will create mask based on context if there isn't any existing cached mask. If a new mask is generated, it will update the cache in the cell. Args: inputs: The input tensor whose shape will be used to generate dropout mask. training: Boolean tensor, whether its in training mode, dropout will be ignored in non-training mode. count: Int, how many dropout mask will be generated. It is useful for cell that has internal weights fused together. Returns: List of mask tensor, generated or cached mask based on context.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py", - "ast_data": "FunctionDef name:get_recurrent_dropout_mask_for_cell arguments arg:self arg:inputs arg:training arg:count If Compare op:Eq Return return:yes Assign Call call:dict Return return:yes" - }, - { - "library": "tensorflow", - "name": "PluginAsset", - "source_code": "class PluginAsset(metaclass = abc.ABCMeta): plugin_name = None @abc.abstractmethod def assets(self): raise NotImplementedError()", - "docstring": "This abstract base class allows TensorBoard to serialize assets to disk. Plugin authors are expected to extend the PluginAsset class, so that it: - has a unique plugin_name - provides an assets method that returns an {asset_name: asset_contents} dictionary. For now, asset_contents are strings, although we may add StringIO support later. LifeCycle of a PluginAsset instance: - It is constructed when get_plugin_asset is called on the class for the first time. - It is configured by code that follows the calls to get_plugin_asset - When the containing graph is serialized by the tf.compat.v1.summary.FileWriter, the writer calls assets and the PluginAsset instance provides its contents to be written to disk.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\summary\\plugin_asset.py", - "ast_data": "ClassDef name:PluginAsset Assign FunctionDef name:assets arguments arg:self Raise raises:NotImplementedError()" - }, - { - "library": "pandas", - "name": "__from_arrow__", - "source_code": "def __from_arrow__(self, array: pa.Array | pa.ChunkedArray) -> ArrowExtensionArray: array_class = self.construct_array_type() arr = array.cast(self.pyarrow_dtype, safe = True) return array_class(arr)", - "docstring": "Construct IntegerArray/FloatingArray from pyarrow Array/ChunkedArray.", - "type": "method", - "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", - "ast_data": "FunctionDef name:__from_arrow__ arguments arg:self arg:array type:pa.Array | pa.ChunkedArray Assign Call call:construct_array_type Assign Call call:cast Return return:yes" - }, - { - "library": "scipy", - "name": "MatlabObject", - "source_code": "class MatlabObject(np.ndarray): def __new__(cls, input_array, classname = None): obj = np.asarray(input_array).view(cls) obj.classname = classname return obj def __array_finalize__(self, obj): self.classname = getattr(obj, 'classname', None)", - "docstring": "Subclass of ndarray to signal this is a matlab object. This is a simple subclass of :class: meant to be used by :func: and should not be instantiated directly.", - "type": "class", - "file_path": "scipy\\scipy\\io\\matlab\\_mio5_params.py", - "ast_data": "ClassDef name:MatlabObject FunctionDef name:__new__ arguments arg:cls arg:input_array arg:classname Assign Call call:view Assign Return return:yes FunctionDef name:__array_finalize__ arguments arg:self arg:obj Assign Call call:getattr" - }, - { - "library": "salmon", - "name": "writable", - "source_code": "def writable(self): return self.producer_fifo or not self.connected", - "docstring": "predicate for inclusion in the writable for select()", - "type": "method", - "file_path": "salmon\\salmon\\_vendor\\asynchat.py", - "ast_data": "FunctionDef name:writable arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "create_script_module", - "source_code": "def create_script_module(nn_module, stubs_fn, share_types = True, is_tracing = False): assert not isinstance(nn_module, torch.jit.RecursiveScriptModule) check_module_initialized(nn_module) concrete_type = get_module_concrete_type(nn_module, share_types) if not is_tracing: AttributeTypeIsSupportedChecker().check(nn_module) return create_script_module_impl(nn_module, concrete_type, stubs_fn)", - "docstring": "Create a new ScriptModule from an nn.Module. Args: nn_module: The original Python nn.Module that we are creating a ScriptModule for. stubs_fn: Lambda that takes an nn.Module and generates a list of ScriptMethodStubs to compile. share_types: Whether to share underlying JIT types between modules (if possible). NOTE: Only set to False this when we cannot guarantee type sharing will work correctly. This only happens today for traced modules, where the same module can produce different traced methods depending on the inputs. is_tracing: Whether this function is called during tracing or scripting. If tracing, we don't need to do AttributeTypeIsSupportedChecker because all the unsupported attributes will be baked as constant in the tracing graph. In addition, this check significantly slows down the traced modules when the module size is big.", - "type": "function", - "file_path": "pytorch\\torch\\jit\\_recursive.py", - "ast_data": "FunctionDef name:create_script_module arguments arg:nn_module arg:stubs_fn arg:share_types arg:is_tracing Assign Call call:get_module_concrete_type If Return return:yes" - }, - { - "library": "scikit-learn", - "name": "inverse_transform", - "source_code": "def inverse_transform(self, X): if issparse(X): X = X.tocsc() it = self.inverse_transform(np.diff(X.indptr).reshape(1, -1)) col_nonzeros = it.ravel() indptr = np.concatenate([[0], np.cumsum(col_nonzeros)]) Xt = csc_matrix((X.data, X.indices, indptr), shape = (X.shape[0], len(indptr) - 1), dtype = X.dtype) return Xt support = self.get_support() X = check_array(X, dtype = None) if support.sum() ! = X.shape[1]: raise ValueError('X has a different shape than during fitting.') if X.ndim = = 1: X = X[None, :] Xt = np.zeros((X.shape[0], support.size), dtype = X.dtype) Xt[:, support] = X return Xt", - "docstring": "Reverse the transformation operation. Parameters ---------- X : array of shape [n_samples, n_selected_features] The input samples. Returns ------- X_original : array of shape [n_samples, n_original_features] with columns of zeros inserted where features would have been removed by :meth:.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\feature_selection\\_base.py", - "ast_data": "FunctionDef name:inverse_transform arguments arg:self arg:X If Call call:issparse Assign Call call:tocsc Assign Call call:inverse_transform Assign Call call:ravel Assign Call call:concatenate Assign Call call:csc_matrix Return return:yes Assign Call call:get_support Assign Call call:check_array If Compare op:NotEq Raise raises:ValueError('X has a different shape than during fitting.') If Compare op:Eq Assign Assign Call call:zeros Assign Return return:yes" - }, - { - "library": "seaborn", - "name": "standard_scale", - "source_code": "@staticmethod def standard_scale(data2d, axis = 1): if axis = = 1: standardized = data2d else: standardized = data2d.T subtract = standardized.min() standardized = (standardized - subtract) / (standardized.max() - standardized.min()) if axis = = 1: return standardized else: return standardized.T", - "docstring": "Divide the data by the difference between the max and min Parameters ---------- data2d : pandas.DataFrame Data to normalize axis : int Which axis to normalize across. If 0, normalize across rows, if 1, normalize across columns. Returns ------- standardized : pandas.DataFrame Noramlized data with a mean of 0 and variance of 1 across the specified axis.", - "type": "method", - "file_path": "seaborn\\seaborn\\matrix.py", - "ast_data": "FunctionDef name:standard_scale arguments arg:data2d arg:axis If Compare op:Eq Assign Assign Assign Call call:min Assign If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "set_workers", - "source_code": "@contextlib.contextmanager def set_workers(workers): old_workers = get_workers() _config.default_workers = _workers(operator.index(workers)) try: yield finally: _config.default_workers = old_workers", - "docstring": "Context manager for the default number of workers used in Parameters ---------- workers : int The default number of workers to use Examples -------- >>> import numpy as np >>> from scipy import fft, signal >>> rng = np.random.default_rng() >>> x = rng.standard_normal((128, 64)) >>> with fft.set_workers(4): ... y = signal.fftconvolve(x, x)", - "type": "function", - "file_path": "scipy\\scipy\\fft\\_pocketfft\\helper.py", - "ast_data": "FunctionDef name:set_workers arguments arg:workers Assign Call call:get_workers Assign Call call:_workers Try Assign" - }, - { - "library": "authlib", - "name": "get_redirect_uri", - "source_code": "def get_redirect_uri(self): raise NotImplementedError()", - "docstring": "A method to get authorization code's ``:: def get_redirect_uri(self): return self.redirect_uri :return: A URL string", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\rfc6749\\models.py", - "ast_data": "FunctionDef name:get_redirect_uri arguments arg:self Raise raises:NotImplementedError()" - }, - { - "library": "scrapy", - "name": "re_rsearch", - "source_code": "def re_rsearch(pattern: str | Pattern[str], text: str, chunk_size: int = 1024) -> tuple[int, int] | None: def _chunk_iter() -> Iterable[tuple[str, int]]: offset = len(text) while True: offset - = chunk_size * 1024 if offset < = 0: break yield (text[offset:], offset) yield (text, 0) if isinstance(pattern, str): pattern = re.compile(pattern) for chunk, offset in _chunk_iter(): matches = list(pattern.finditer(chunk)) if matches: start, end = matches[-1].span() return (offset + start, offset + end) return None", - "docstring": "This function does a reverse search in a text using a regular expression given in the attribute 'pattern'. Since the re module does not provide this functionality, we have to find for the expression into chunks of text extracted from the end (for the sake of efficiency). At first, a chunk of 'chunk_size' kilobytes is extracted from the end, and searched for the pattern. If the pattern is not found, another chunk is extracted, and another search is performed. This process continues until a match is found, or until the whole file is read. In case the pattern wasn't found, None is returned, otherwise it returns a tuple containing the start position of the match, and the ending (regarding the entire text).", - "type": "function", - "file_path": "scrapy\\scrapy\\utils\\python.py", - "ast_data": "FunctionDef name:re_rsearch arguments arg:pattern type:str | Pattern[str] arg:text type:str arg:chunk_size type:int FunctionDef name:_chunk_iter arguments Assign Call call:len While If Compare op:LtE If Call call:isinstance Assign Call call:compile For Call call:_chunk_iter Assign Call call:list If Assign Call call:span Return return:yes Return return:yes" - }, - { - "library": "kornia", - "name": "IntegralImage", - "source_code": "class IntegralImage(Module): def forward(self, input: Tensor) -> Tensor: return integral_image(input)", - "docstring": "Calculates integral of the input image tensor. This particular version sums over the last two dimensions. Args: image: the input image tensor with shape :math:. Returns: Integral tensor for the input image tensor with shape :math:. Shape: - Input: :math: - Output: :math: Examples: >>> input = torch.ones(1, 5, 5) >>> output = IntegralImage()(input) >>> output tensor([[[ 1., 2., 3., 4., 5.], [ 2., 4., 6., 8., 10.], [ 3., 6., 9., 12., 15.], [ 4., 8., 12., 16., 20.], [ 5., 10., 15., 20., 25.]]])", - "type": "class", - "file_path": "kornia\\kornia\\enhance\\integral.py", - "ast_data": "ClassDef name:IntegralImage FunctionDef name:forward arguments arg:self arg:input type:Tensor Return return:yes" - }, - { - "library": "pandas", - "name": "walk", - "source_code": "def walk(self, where: str = '/') -> Iterator[tuple[str, list[str], list[str]]]: _tables() self._check_if_open() assert self._handle is not None assert _table_mod is not None for g in self._handle.walk_groups(where): if getattr(g._v_attrs, 'pandas_type', None) is not None: continue groups = [] leaves = [] for child in g._v_children.values(): pandas_type = getattr(child._v_attrs, 'pandas_type', None) if pandas_type is None: if isinstance(child, _table_mod.group.Group): groups.append(child._v_name) else: leaves.append(child._v_name) yield (g._v_pathname.rstrip('/'), groups, leaves)", - "docstring": "Walk the pytables group hierarchy for pandas objects. This generator will yield the group path, subgroups and pandas object names for each group. Any non-pandas PyTables objects that are not a group will be ignored. The group itself is listed first (preorder), then each of its child groups (following an alphanumerical order) is also traversed, following the same procedure. Parameters ---------- where : str, default \"/\" Group where to start walking. Yields ------ path : str Full path to a group (without trailing '/'). groups : list Names (strings) of the groups contained in . leaves : list Names (strings) of the pandas objects contained in . See Also -------- HDFStore.info : Prints detailed information on the store. Examples -------- >>> df1 = pd.DataFrame([[1, 2], [3, 4]], columns=[\"A\", \"B\"]) >>> store = pd.HDFStore(\"store.h5\", \"w\") # doctest: +SKIP >>> store.put(\"data\", df1, format=\"table\") # doctest: +SKIP >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=[\"A\", \"B\"]) >>> store.append(\"data\", df2) # doctest: +SKIP >>> store.close() # doctest: +SKIP >>> for group in store.walk(): # doctest: +SKIP ... print(group) # doctest: +SKIP >>> store.close() # doctest: +SKIP", - "type": "method", - "file_path": "pandas\\pandas\\io\\pytables.py", - "ast_data": "FunctionDef name:walk arguments arg:self arg:where type:str For Call call:walk_groups If Compare op:IsNot Assign Assign For Call call:values Assign Call call:getattr If Compare op:Is If Call call:isinstance" - }, - { - "library": "tensorflow", - "name": "update_mask", - "source_code": "def update_mask(self, padding_mask, dataset_batch): original_batch_size = self.get_real_batch_size(dataset_batch) missing_count = self.padded_batch_size - original_batch_size mask = backend.concatenate([array_ops.ones(original_batch_size), array_ops.zeros(missing_count)], axis = 0) return backend.concatenate([padding_mask, mask], axis = 0)", - "docstring": "Calculate and cache the amount of padding required for a batch.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\partial_batch_padding_handler.py", - "ast_data": "FunctionDef name:update_mask arguments arg:self arg:padding_mask arg:dataset_batch Assign Call call:get_real_batch_size Assign Assign Call call:concatenate Return return:yes" - }, - { - "library": "pytorch", - "name": "determine_observer_insert_points", - "source_code": "def determine_observer_insert_points(self, prepared_fx_model: GraphModule) -> dict[str, dict[str, Any]]: obs_ctr = ModelReportObserver obs_fqn_to_info: dict[str, dict[str, Any]] = {} for fqn, module in prepared_fx_model.named_modules(): if self._is_supported(module, insert = True): targeted_node = self._get_targeting_node(prepared_fx_model, fqn) pre_obs_fqn = fqn + '.' + self.DEFAULT_PRE_OBSERVER_NAME obs_fqn_to_info[pre_obs_fqn] = {DETECTOR_TARGET_NODE_KEY: targeted_node, DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(), DETECTOR_IS_POST_OBS_KEY: False, DETECTOR_OBS_ARGS_KEY: targeted_node.args} post_obs_fqn = fqn + '.' + self.DEFAULT_POST_OBSERVER_NAME obs_fqn_to_info[post_obs_fqn] = {DETECTOR_TARGET_NODE_KEY: targeted_node, DETECTOR_OBS_TO_INSERT_KEY: obs_ctr(), DETECTOR_IS_POST_OBS_KEY: True, DETECTOR_OBS_ARGS_KEY: (targeted_node,)} return obs_fqn_to_info", - "docstring": "Determines where observers need to be inserted for the Dynamic vs Static detector. For this detector, we want to place observers on either side of linear layers in the model. Currently inserts observers for: linear layers Args: prepared_fx_model (GraphModule): The prepared Fx GraphModule Returns a Dict mapping from unique observer fqns (where we want to insert them) to a Dict with: key \"target_node\" -> the node we are trying to observe with this observer (torch.fx.node.Node) key \"observer_to_insert\" -> the observer we wish to insert (ObserverBase) key \"is_post_observer\" -> True if this is meant to be a post-observer for target_node, False if pre-observer key \"observer_args\" -> The arguments that are meant to be passed into the observer", - "type": "method", - "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\detector.py", - "ast_data": "FunctionDef name:determine_observer_insert_points arguments arg:self arg:prepared_fx_model type:GraphModule Assign For Call call:named_modules If Call call:_is_supported Assign Call call:_get_targeting_node Assign Assign Assign Assign Return return:yes" - }, - { - "library": "virtualenv", - "name": "reset", - "source_code": "def reset(self): pass", - "docstring": "This is a temporary folder, is already empty to start with.", - "type": "method", - "file_path": "virtualenv\\src\\virtualenv\\app_data\\via_tempdir.py", - "ast_data": "FunctionDef name:reset arguments arg:self" - }, - { - "library": "coconut", - "name": "reconstitute_paramdef", - "source_code": "def reconstitute_paramdef(pos_only_args, req_args, default_args, star_arg, kwd_only_args, dubstar_arg): args_list = [] if pos_only_args: args_list + = pos_only_args args_list.append('/') args_list + = req_args for name, default in default_args: args_list.append(name + ' = ' + default) if star_arg is not None: args_list.append('*' + star_arg) elif kwd_only_args: args_list.append('*') for name, default in kwd_only_args: if default is None: args_list.append(name) else: args_list.append(name + ' = ' + default) if dubstar_arg is not None: args_list.append('**' + dubstar_arg) return ', '.join(args_list)", - "docstring": "Convert the results of split_args_list back into a parameter defintion string.", - "type": "function", - "file_path": "coconut\\coconut\\compiler\\compiler.py", - "ast_data": "FunctionDef name:reconstitute_paramdef arguments arg:pos_only_args arg:req_args arg:default_args arg:star_arg arg:kwd_only_args arg:dubstar_arg Assign If For If Compare op:IsNot If For If Compare op:Is If Compare op:IsNot Return return:yes" - }, - { - "library": "mongo", - "name": "gen", - "source_code": "def gen() -> Iterator[tuple[int, Mapping[str, Any]]]: for document in documents: common.validate_is_document_type('document', document) if not isinstance(document, RawBSONDocument): if '_id' not in document: document['_id'] = ObjectId() inserted_ids.append(document['_id']) yield (message._INSERT, document)", - "docstring": "A generator that validates documents and handles _ids.", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\collection.py", - "ast_data": "FunctionDef name:gen arguments For If If Compare op:NotIn Assign Call call:ObjectId" - }, - { - "library": "pytorch", - "name": "convert", - "source_code": "def convert(module, mapping = None, inplace = False, remove_qconfig = True, is_reference = False, convert_custom_config_dict = None, use_precomputed_fake_quant = False): torch._C._log_api_usage_once('quantization_api.quantize.convert') if not inplace: module = copy.deepcopy(module) _convert(module, mapping, inplace = True, is_reference = is_reference, convert_custom_config_dict = convert_custom_config_dict, use_precomputed_fake_quant = use_precomputed_fake_quant) if remove_qconfig: _remove_qconfig(module) return module", - "docstring": "Converts submodules in input module to a different module according to by calling method on the target module class. And remove qconfig at the end if remove_qconfig is set to True. Args: : prepared and calibrated module : a dictionary that maps from source module type to target module type, can be overwritten to allow swapping user defined Modules : carry out model transformations in-place, the original module is mutated : custom configuration dictionary for convert function : a flag to enable use of precomputed fake quant .. code-block:: python # Example of convert_custom_config_dict: convert_custom_config_dict = { # user will manually define the corresponding quantized # module class which has a from_observed class method that converts # observed custom module to quantized custom module \"observed_to_quantized_custom_module_class\": { ObservedCustomModule: QuantizedCustomModule } }", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py", - "ast_data": "FunctionDef name:convert arguments arg:module arg:mapping arg:inplace arg:remove_qconfig arg:is_reference arg:convert_custom_config_dict arg:use_precomputed_fake_quant If Assign Call call:deepcopy If Return return:yes" - }, - { - "library": "pandas", - "name": "IsInForObjects", - "source_code": "class IsInForObjects: variants = ['nans', 'short', 'long', 'long_floats'] params = [variants, variants] param_names = ['series_type', 'vals_type'] def setup(self, series_type, vals_type): N_many = 10 ** 5 if series_type = = 'nans': ser_vals = np.full(10 ** 4, np.nan) elif series_type = = 'short': ser_vals = np.arange(2) elif series_type = = 'long': ser_vals = np.arange(N_many) elif series_type = = 'long_floats': ser_vals = np.arange(N_many, dtype = np.float64) self.series = Series(ser_vals).astype(object) if vals_type = = 'nans': values = np.full(10 ** 4, np.nan) elif vals_type = = 'short': values = np.arange(2) elif vals_type = = 'long': values = np.arange(N_many) elif vals_type = = 'long_floats': values = np.arange(N_many, dtype = np.float64) self.values = values.astype(object) def time_isin(self, series_type, vals_type): self.series.isin(self.values)", - "docstring": "A subset of the cartesian product of cases have special motivations: \"nans\" x \"nans\" if nan-objects are different objects, this has the potential to trigger O(n^2) running time \"short\" x \"long\" running time dominated by the preprocessing \"long\" x \"short\" running time dominated by look-up \"long\" x \"long\" no dominating part \"long_floats\" x \"long_floats\" because of nans floats are special no dominating part", - "type": "class", - "file_path": "pandas\\asv_bench\\benchmarks\\algos\\isin.py", - "ast_data": "ClassDef name:IsInForObjects Assign Assign Assign FunctionDef name:setup arguments arg:self arg:series_type arg:vals_type Assign If Compare op:Eq Assign Call call:full If Compare op:Eq Assign Call call:arange If Compare op:Eq Assign Call call:arange If Compare op:Eq Assign Call call:arange Assign Call call:astype If Compare op:Eq Assign Call call:full If Compare op:Eq Assign Call call:arange If Compare op:Eq Assign Call call:arange If Compare op:Eq Assign Call call:arange Assign Call call:astype FunctionDef name:time_isin arguments arg:self arg:series_type arg:vals_type" - }, - { - "library": "pytorch", - "name": "set_unbacked_var_to_val", - "source_code": "@record_shapeenv_event() def set_unbacked_var_to_val(self, k: sympy.Symbol, v: int) -> None: log.info('set_unbacked_var_to_val %s = %s', k, v) self.unbacked_var_to_val[k] = sympy.sympify(v)", - "docstring": "Used only when propagate_real_tensors; registers a value for an unbacked symbol, which can be used last resort to resolve hints.", - "type": "method", - "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", - "ast_data": "FunctionDef name:set_unbacked_var_to_val arguments arg:self arg:k type:sympy.Symbol arg:v type:int Call call:record_shapeenv_event Assign Call call:sympify" - }, - { - "library": "matplotlib", - "name": "interactive", - "source_code": "def interactive(b): rcParams['interactive'] = b", - "docstring": "Set whether to redraw after every plotting command (e.g. ).", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\__init__.py", - "ast_data": "FunctionDef name:interactive arguments arg:b Assign" - }, - { - "library": "scipy", - "name": "Schaffer04", - "source_code": "class Schaffer04(Benchmark): def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N)) self.custom_bounds = [(-10, 10), (-10, 10)] self.global_optimum = [[0.0, 1.253115]] self.fglob = 0.292579 def fun(self, x, *args): self.nfev + = 1 num = cos(sin(abs(x[0] ** 2 - x[1] ** 2))) ** 2 - 0.5 den = (1 + 0.001 * (x[0] ** 2 + x[1] ** 2)) ** 2 return 0.5 + num / den", - "docstring": "Schaffer 4 objective function. This class defines the Schaffer 4 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Schaffer04}}(x) = 0.5 + \\frac{\\cos^2 \\left( \\sin(x_1^2 - x_2^2) \\right ) - 0.5}{1 + 0.001(x_1^2 + x_2^2)^2}^2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Mishra, S. Some new test functions for global optimization and performance of repulsive particle swarm method. Munich Personal RePEc Archive, 2006, 2718", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py", - "ast_data": "ClassDef name:Schaffer04 FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Assign Assign Return return:yes" - }, - { - "library": "numpy", - "name": "default_device", - "source_code": "def default_device(self): return 'cpu'", - "docstring": "The default device used for new NumPy arrays. For NumPy, this always returns ``. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices Returns ------- device : str The default device used for new NumPy arrays. Examples -------- >>> info = np.__array_namespace_info__() >>> info.default_device() 'cpu'", - "type": "method", - "file_path": "numpy\\numpy\\_array_api_info.py", - "ast_data": "FunctionDef name:default_device arguments arg:self Return return:yes" - }, - { - "library": "pandas", - "name": "update", - "source_code": "def update(self, other: Series | Sequence | Mapping) -> None: if not PYPY: if sys.getrefcount(self) < = REF_COUNT: warnings.warn(_chained_assignment_method_msg, ChainedAssignmentError, stacklevel = 2) if not isinstance(other, Series): other = Series(other) other = other.reindex_like(self) mask = notna(other) self._mgr = self._mgr.putmask(mask = mask, new = other)", - "docstring": "Modify Series in place using values from passed Series. Uses non-NA values from passed Series to make updates. Aligns on index. Parameters ---------- other : Series, or object coercible into Series Other Series that provides values to update the current Series. See Also -------- Series.combine : Perform element-wise operation on two Series using a given function. Series.transform: Modify a Series using a function. Examples -------- >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6])) >>> s 0 4 1 5 2 6 dtype: int64 >>> s = pd.Series([\"a\", \"b\", \"c\"]) >>> s.update(pd.Series([\"d\", \"e\"], index=[0, 2])) >>> s 0 d 1 b 2 e dtype: object >>> s = pd.Series([1, 2, 3]) >>> s.update(pd.Series([4, 5, 6, 7, 8])) >>> s 0 4 1 5 2 6 dtype: int64 If `` can also be a non-Series object type that is coercible into a Series >>> s = pd.Series([1, 2, 3]) >>> s.update([4, np.nan, 6]) >>> s 0 4 1 2 2 6 dtype: int64 >>> s = pd.Series([1, 2, 3]) >>> s.update({1: 9}) >>> s 0 1 1 9 2 3 dtype: int64", - "type": "method", - "file_path": "pandas\\pandas\\core\\series.py", - "ast_data": "FunctionDef name:update arguments arg:self arg:other type:Series | Sequence | Mapping If If Compare op:LtE If Assign Call call:Series Assign Call call:reindex_like Assign Call call:notna Assign Call call:putmask" - }, - { - "library": "tensorflow", - "name": "__add__", - "source_code": "def __add__(self, other): ret = RichLine() if isinstance(other, str): ret.text = self.text + other ret.font_attr_segs = self.font_attr_segs[:] return ret elif isinstance(other, RichLine): ret.text = self.text + other.text ret.font_attr_segs = self.font_attr_segs[:] old_len = len(self.text) for start, end, font_attr in other.font_attr_segs: ret.font_attr_segs.append((old_len + start, old_len + end, font_attr)) return ret else: raise TypeError('%r cannot be concatenated with a RichLine' % other)", - "docstring": "Concatenate two chunks of maybe rich text to make a longer rich line. Does not modify self. Args: other: Another piece of text to concatenate with this one. If it is a plain str, it will be appended to this string with no attributes. If it is a RichLine, it will be appended to this string with its attributes preserved. Returns: A new RichLine comprising both chunks of text, with appropriate attributes applied to the corresponding substrings.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", - "ast_data": "FunctionDef name:__add__ arguments arg:self arg:other Assign Call call:RichLine If Call call:isinstance Assign Assign Return return:yes If Call call:isinstance Assign Assign Assign Call call:len For Return return:yes Raise raises:TypeError('%r cannot be concatenated with a RichLine' % other)" - }, - { - "library": "tensorflow", - "name": "all_gather_v2", - "source_code": "def all_gather_v2(t, group_size, group_key, instance_key, communication_hint = 'auto', timeout = 0, ordering_token = None, name = None): if ordering_token is not None: ordering_token = [ordering_token] else: ordering_token = [] return gen_collective_ops.collective_gather_v2(t, group_size = group_size, group_key = group_key, instance_key = instance_key, communication_hint = communication_hint.lower(), timeout_seconds = timeout, is_stateless = False, ordering_token = ordering_token, name = name)", - "docstring": "Accumulates tensors collectively, across devices, along first dimension. Args: t: the tensor to participate in the accumulation. group_size: an int32 tensor, the total number of tensors to be collectively accumulated. Each must reside on a different device. Should be a positive integer. group_key: an int32 tensor identifying the group of devices. instance_key: an int32 tensor identifying the participating group of Ops. communication_hint: preferred collective communication. The implementation may fall back to another mechanism. Options include , , and . timeout: a float. If set to a non zero, set a completion timeout to detect staleness. If the timer goes off, a DeadlineExceededError is raised. The timeout value in seconds. This feature is experimental. ordering_token: a resource tensor on the same device as the op to order the collectives in a per-device manner by auto control dependency. This argument can be omited when there is one collective Op per , or when explicit control dependency is used instead of auto control dependency. name: name of the Op. Returns: An Op implementing the distributed operation.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\collective_ops.py", - "ast_data": "FunctionDef name:all_gather_v2 arguments arg:t arg:group_size arg:group_key arg:instance_key arg:communication_hint arg:timeout arg:ordering_token arg:name If Compare op:IsNot Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "codegen_static_numels", - "source_code": "def codegen_static_numels(self, code): def is_static_integer(expr: sympy.Expr) -> bool: return isinstance(expr, (sympy.Integer, int)) for tree in self.range_trees: if not tree.is_reduction or self.inside_reduction: simplified_tree_numel = V.graph.sizevars.simplify(tree.numel) if is_static_integer(simplified_tree_numel): code.writeline(f'{tree.prefix}numel = {int(simplified_tree_numel)}') if tree.is_reduction and self.persistent_reduction: if self.cooperative_reduction: numel = self.kexpr(self.rename_indexing(tree.numel)) val = f'triton_helpers.constexpr_next_power_of_2(({numel} + RSPLIT - 1) // RSPLIT)' else: val = self._get_persistent_RBLOCK(tree.numel) code.writeline(f'{tree.prefix.upper()}BLOCK: tl.constexpr = {val}') if tree.prefix = = 'x' and self.no_x_dim: code.writeline('XBLOCK: tl.constexpr = 1')", - "docstring": "We get a small speedup from hard coding numels if they are static. This code stomps on the passed-in values by writing an constant to the top of the kernel. In a kernel like: def KERNEL_NAME(in_ptr0, in_ptr1, out_ptr2, xnumel, r0_numel, XBLOCK : tl.constexpr, R0_BLOCK : tl.constexpr): We would add xnumel = 4096 r0_numel = 768 After the signature, before the kernel code, if we decided to make these static. As its hardcoded, it becomes a better signal to triton on how to unroll and do some static indexing. So, it's not so much that downstream knows that its a static numel, as that you just plop a constant into the kernel.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py", - "ast_data": "FunctionDef name:codegen_static_numels arguments arg:self arg:code FunctionDef name:is_static_integer arguments arg:expr type:sympy.Expr Return return:yes For If BoolOp Assign Call call:simplify If Call call:is_static_integer If BoolOp If Assign Call call:kexpr Assign Assign Call call:_get_persistent_RBLOCK If BoolOp Compare op:Eq" - }, - { - "library": "pytorch", - "name": "CircularPad2d", - "source_code": "class CircularPad2d(_CircularPadNd): padding: tuple[int, int, int, int] def __init__(self, padding: _size_4_t) -> None: super().__init__() self.padding = _quadruple(padding) def _check_input_dim(self, input): if input.dim() ! = 3 and input.dim() ! = 4: raise ValueError(f'expected 3D or 4D input (got {input.dim()}D input)')", - "docstring": "Pads the input tensor using circular padding of the input boundary. Tensor values at the beginning of the dimension are used to pad the end, and values at the end are used to pad the beginning. If negative padding is applied then the ends of the tensor get removed. For -dimensional padding, use :func:. Args: padding (int, tuple): the size of the padding. If is , uses the same padding in all boundaries. If a 4-, uses (:math:, :math:, :math:, :math:) Shape: - Input: :math: or :math:. - Output: :math: or :math:, where :math: :math: Examples:: >>> m = nn.CircularPad2d(2) >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3) >>> input tensor([[[[0., 1., 2.], [3., 4., 5.], [6., 7., 8.]]]]) >>> m(input) tensor([[[[4., 5., 3., 4., 5., 3., 4.], [7., 8., 6., 7., 8., 6., 7.], [1., 2., 0., 1., 2., 0., 1.], [4., 5., 3., 4., 5., 3., 4.], [7., 8., 6., 7., 8., 6., 7.], [1., 2., 0., 1., 2., 0., 1.], [4., 5., 3., 4., 5., 3., 4.]]]]) >>> # using different paddings for different sides >>> m = nn.CircularPad2d((1, 1, 2, 0)) >>> m(input) tensor([[[[5., 3., 4., 5., 3.], [8., 6., 7., 8., 6.], [2., 0., 1., 2., 0.], [5., 3., 4., 5., 3.], [8., 6., 7., 8., 6.]]]])", - "type": "class", - "file_path": "pytorch\\torch\\nn\\modules\\padding.py", - "ast_data": "ClassDef name:CircularPad2d FunctionDef name:__init__ arguments arg:self arg:padding type:_size_4_t Assign Call call:_quadruple FunctionDef name:_check_input_dim arguments arg:self arg:input If BoolOp Compare op:NotEq Compare op:NotEq Raise raises:ValueError(f'expected 3D or 4D input (got {input.dim()}D input)')" - }, - { - "library": "matplotlib", - "name": "set_bounds", - "source_code": "def set_bounds(self, *args): if len(args) = = 1: l, b, w, h = args[0] else: l, b, w, h = args self._x = l self._y = b self._width = w self._height = h self.stale = True", - "docstring": "Set the bounds of the rectangle. Call signatures:: set_bounds(left, bottom, width, height) set_bounds((left, bottom, width, height)) Parameters ---------- left, bottom : float The coordinates of the bottom left corner of the rectangle. width, height : float The width/height of the rectangle.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:set_bounds arguments arg:self vararg:args If Compare op:Eq Assign Assign Assign Assign Assign Assign Assign" - }, - { - "library": "cherrypy", - "name": "load", - "source_code": "@classmethod def load(self, input): is_file = isinstance(input, text_or_bytes) or hasattr(input, 'read') return Parser().dict_from_file(input) if is_file else input.copy()", - "docstring": "Resolve 'input' to dict from a dict, file, or filename.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\lib\\reprconf.py", - "ast_data": "FunctionDef name:load arguments arg:self arg:input Assign BoolOp Call call:isinstance Call call:hasattr Return return:yes" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, ax, loc = None, bbox = None, **kwargs): super().__init__() if isinstance(loc, str): if loc not in self.codes: raise ValueError('Unrecognized location {!r}. Valid locations are\\n\\t{}'.format(loc, '\\n\\t'.join(self.codes))) loc = self.codes[loc] self.set_figure(ax.get_figure(root = False)) self._axes = ax self._loc = loc self._bbox = bbox ax._unstale_viewLim() self.set_transform(ax.transAxes) self._cells = {} self._edges = None self._autoColumns = [] self._autoFontsize = True self._internal_update(kwargs) self.set_clip_on(False)", - "docstring": "Parameters ---------- ax : The to plot the table into. loc : str, optional The position of the cell with respect to *ax*. This must be one of the . bbox : or [xmin, ymin, width, height], optional A bounding box to draw the table into. If this is not *None*, this overrides *loc*. Other Parameters ---------------- **kwargs properties.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\table.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:ax arg:loc arg:bbox kwarg:kwargs If Call call:isinstance If Compare op:NotIn Raise raises:ValueError('Unrecognized location {!r}. Valid locations are\\n\\t{}'.format(loc, '\\n\\t'.join(self.codes))) Assign Assign Assign Assign Assign Assign Assign Assign" - }, - { - "library": "matplotlib", - "name": "register_scale", - "source_code": "def register_scale(scale_class): _scale_mapping[scale_class.name] = scale_class", - "docstring": "Register a new kind of scale. Parameters ---------- scale_class : subclass of The scale to register.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\scale.py", - "ast_data": "FunctionDef name:register_scale arguments arg:scale_class Assign" - }, - { - "library": "numpy", - "name": "truncate", - "source_code": "def truncate(self, size): isize = int(size) if isize ! = size or isize < 1: raise ValueError('size must be a positive integer') if isize > = len(self.coef): coef = self.coef else: coef = self.coef[: isize] return self.__class__(coef, self.domain, self.window, self.symbol)", - "docstring": "Truncate series to length . Reduce the series to length by discarding the high degree terms. The value of must be a positive integer. This can be useful in least squares where the coefficients of the high degree terms may be very small. Parameters ---------- size : positive int The series is reduced to length by discarding the high degree terms. The value of must be a positive integer. Returns ------- new_series : series New instance of series with truncated coefficients.", - "type": "method", - "file_path": "numpy\\numpy\\polynomial\\_polybase.py", - "ast_data": "FunctionDef name:truncate arguments arg:self arg:size Assign Call call:int If BoolOp Compare op:NotEq Compare op:Lt Raise raises:ValueError('size must be a positive integer') If Compare op:GtE Assign Assign Return return:yes" - }, - { - "library": "pygame", - "name": "blit_array", - "source_code": "def blit_array(surface, array): if isinstance(array, numpy_ndarray) and array.dtype in numpy_floats: array = array.round(0).astype(numpy_uint32) return array_to_surface(surface, array)", - "docstring": "pygame.surfarray.blit_array(Surface, array): return None Blit directly from a array values. Directly copy values from an array into a Surface. This is faster than converting the array into a Surface and blitting. The array must be the same dimensions as the Surface and will completely replace all pixel values. Only integer, ascii character and record arrays are accepted. This function will temporarily lock the Surface as the new values are copied.", - "type": "function", - "file_path": "pygame\\src_py\\surfarray.py", - "ast_data": "FunctionDef name:blit_array arguments arg:surface arg:array If BoolOp Call call:isinstance Compare op:In Assign Call call:astype Return return:yes" - }, - { - "library": "pytorch", - "name": "StorageReader", - "source_code": "class StorageReader(abc.ABC): @abc.abstractmethod def reset(self, checkpoint_id: Union[str, os.PathLike, None] = None) -> None: ... @abc.abstractmethod def read_metadata(self) -> Metadata: pass @abc.abstractmethod def set_up_storage_reader(self, metadata: Metadata, is_coordinator: bool) -> None: pass @abc.abstractmethod def prepare_local_plan(self, plan: LoadPlan) -> LoadPlan: pass @abc.abstractmethod def prepare_global_plan(self, plans: list[LoadPlan]) -> list[LoadPlan]: pass @abc.abstractmethod def read_data(self, plan: LoadPlan, planner: LoadPlanner) -> Future[None]: pass @classmethod @abc.abstractmethod def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool: ...", - "docstring": "Interface used by ``: 0) (all ranks) set checkpoint_id if users pass a valid checkpoint_id. 1) (all ranks) read_metadata() 2) (all ranks) set_up_storage_reader() 3) (all ranks) prepare_local_plan() 4) (coordinator) prepare_global_plan() 5) (all ranks) read_data()", - "type": "class", - "file_path": "pytorch\\torch\\distributed\\checkpoint\\storage.py", - "ast_data": "ClassDef name:StorageReader FunctionDef name:reset arguments arg:self arg:checkpoint_id type:Union[str, os.PathLike, None] FunctionDef name:read_metadata arguments arg:self FunctionDef name:set_up_storage_reader arguments arg:self arg:metadata type:Metadata arg:is_coordinator type:bool FunctionDef name:prepare_local_plan arguments arg:self arg:plan type:LoadPlan FunctionDef name:prepare_global_plan arguments arg:self arg:plans type:list[LoadPlan] FunctionDef name:read_data arguments arg:self arg:plan type:LoadPlan arg:planner type:LoadPlanner FunctionDef name:validate_checkpoint_id arguments arg:cls arg:checkpoint_id type:Union[str, os.PathLike]" - }, - { - "library": "cherrypy", - "name": "delete", - "source_code": "def delete(self): raise NotImplementedError", - "docstring": "Remove ALL cached variants of the current resource.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\lib\\caching.py", - "ast_data": "FunctionDef name:delete arguments arg:self Raise raises:NotImplementedError" - }, - { - "library": "pytorch", - "name": "stride_ordered", - "source_code": "@staticmethod def stride_ordered(sizes, order): assert OrderedSet(range(len(sizes))) = = OrderedSet(order) fill_order = stride_order2fill_order(order) return FlexibleLayout.fill_ordered(sizes, fill_order)", - "docstring": "Create a stride based on the sorted order of a permuted range. In this format, channels last would be: [3, 0, 2, 1]", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\ir.py", - "ast_data": "FunctionDef name:stride_ordered arguments arg:sizes arg:order Assign Call call:stride_order2fill_order Return return:yes" - }, - { - "library": "mongo", - "name": "pool_closed", - "source_code": "def pool_closed(self, event: PoolClosedEvent) -> None: raise NotImplementedError", - "docstring": "Abstract method to handle a . Emitted when a connection Pool is closed. :param event: An instance of :class:.", - "type": "method", - "file_path": "mongo\\pymongo\\monitoring.py", - "ast_data": "FunctionDef name:pool_closed arguments arg:self arg:event type:PoolClosedEvent Raise raises:NotImplementedError" - }, - { - "library": "kornia", - "name": "from_matrix", - "source_code": "@classmethod def from_matrix(cls, matrix: Tensor) -> Se2: r = So2.from_matrix(matrix[..., : 2, : 2]) t = matrix[..., : 2, -1] return cls(r, t)", - "docstring": "Create an Se2 group from a matrix. Args: matrix: tensor of shape :math:. Example: >>> s = Se2.from_matrix(torch.eye(3).repeat(2, 1, 1)) >>> s.r Parameter containing: tensor([1.+0.j, 1.+0.j], requires_grad=True) >>> s.t Parameter containing: tensor([[0., 0.], [0., 0.]], requires_grad=True)", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py", - "ast_data": "FunctionDef name:from_matrix arguments arg:cls arg:matrix type:Tensor Assign Call call:from_matrix Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "is_in_onnx_export", - "source_code": "def is_in_onnx_export() -> bool: from torch.onnx._globals import GLOBALS from torch.onnx._internal.exporter import _flags return GLOBALS.in_onnx_export or _flags._is_onnx_exporting", - "docstring": "Returns whether it is in the middle of ONNX export.", - "type": "function", - "file_path": "pytorch\\torch\\onnx\\__init__.py", - "ast_data": "FunctionDef name:is_in_onnx_export arguments Return return:yes" - }, - { - "library": "pytorch", - "name": "zip_folder", - "source_code": "def zip_folder(folder_to_zip: Path, dest_file_base_name: Path) -> Path: if dest_file_base_name.suffix = = '.zip': dest_file_base_name = dest_file_base_name.with_suffix('') ensure_dir_exists(dest_file_base_name.parent) print(f'Zipping {folder_to_zip}\\n to {dest_file_base_name}') return Path(shutil.make_archive(str(dest_file_base_name), 'zip', folder_to_zip))", - "docstring": "Returns the path to the resulting zip file, with the appropriate extension added if needed", - "type": "function", - "file_path": "pytorch\\.github\\scripts\\file_io_utils.py", - "ast_data": "FunctionDef name:zip_folder arguments arg:folder_to_zip type:Path arg:dest_file_base_name type:Path If Compare op:Eq Assign Call call:with_suffix Return return:yes" - }, - { - "library": "numpy", - "name": "as_apply", - "source_code": "def as_apply(func, *args, **kwargs): return Expr(Op.APPLY, (func, tuple(map(as_expr, args)), {k: as_expr(v) for k, v in kwargs.items()}))", - "docstring": "Return object as APPLY expression (function call, constructor, etc.)", - "type": "function", - "file_path": "numpy\\numpy\\f2py\\symbolic.py", - "ast_data": "FunctionDef name:as_apply arguments arg:func vararg:args kwarg:kwargs Return return:yes" - }, - { - "library": "coconut", - "name": "unlink", - "source_code": "def unlink(link_path): if os.path.islink(link_path): os.unlink(link_path) return True return False", - "docstring": "Remove a symbolic link if one exists. Return whether anything was done.", - "type": "function", - "file_path": "coconut\\coconut\\command\\util.py", - "ast_data": "FunctionDef name:unlink arguments arg:link_path If Call call:islink Return return:yes Return return:yes" - }, - { - "library": "coconut", - "name": "log_func", - "source_code": "def log_func(self, func): if self.verbose: to_log = func() if not isinstance(to_log, tuple): to_log = (to_log,) self.printlog(*to_log)", - "docstring": "Calls a function and logs the results if --verbose.", - "type": "method", - "file_path": "coconut\\coconut\\terminal.py", - "ast_data": "FunctionDef name:log_func arguments arg:self arg:func If Assign Call call:func If Assign" - }, - { - "library": "pandas", - "name": "factorize", - "source_code": "def factorize(self, use_na_sentinel: bool = True) -> tuple[np.ndarray, ExtensionArray]: arr, na_value = self._values_for_factorize() codes, uniques = factorize_array(arr, use_na_sentinel = use_na_sentinel, na_value = na_value) uniques_ea = self._from_factorized(uniques, self) return (codes, uniques_ea)", - "docstring": "Encode the extension array as an enumerated type. Parameters ---------- use_na_sentinel : bool, default True If True, the sentinel -1 will be used for NaN values. If False, NaN values will be encoded as non-negative integers and will not drop the NaN from the uniques of the values. .. versionadded:: 1.5.0 Returns ------- codes : ndarray An integer NumPy array that's an indexer into the original ExtensionArray. uniques : ExtensionArray An ExtensionArray containing the unique values of . .. note:: uniques will *not* contain an entry for the NA value of the ExtensionArray if there are any missing values present in . See Also -------- factorize : Top-level factorize method that dispatches here. Notes ----- :meth: offers a keyword as well. Examples -------- >>> idx1 = pd.PeriodIndex( ... [\"2014-01\", \"2014-01\", \"2014-02\", \"2014-02\", \"2014-03\", \"2014-03\"], ... freq=\"M\", ... ) >>> arr, idx = idx1.factorize() >>> arr array([0, 0, 1, 1, 2, 2]) >>> idx PeriodIndex(['2014-01', '2014-02', '2014-03'], dtype='period[M]')", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\base.py", - "ast_data": "FunctionDef name:factorize arguments arg:self arg:use_na_sentinel type:bool Assign Call call:_values_for_factorize Assign Call call:factorize_array Assign Call call:_from_factorized Return return:yes" - }, - { - "library": "scipy", - "name": "apply_filter", - "source_code": "def apply_filter(self, x, axis = -1, mode = 'constant', cval = 0): output_len = _output_len(self._h_len_orig, x.shape[axis], self._up, self._down) output_shape = np.asarray(x.shape, dtype = np.int64) output_shape[axis] = output_len out = np.zeros(output_shape, dtype = self._output_type, order = 'C') axis = axis % x.ndim mode = _check_mode(mode) _apply(np.asarray(x, self._output_type), self._h_trans_flip, out, self._up, self._down, axis, mode, cval) return out", - "docstring": "Apply the prepared filter to the specified axis of N-D signal x.", - "type": "method", - "file_path": "scipy\\scipy\\signal\\_upfirdn.py", - "ast_data": "FunctionDef name:apply_filter arguments arg:self arg:x arg:axis arg:mode arg:cval Assign Call call:_output_len Assign Call call:asarray Assign Assign Call call:zeros Assign Assign Call call:_check_mode Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_useLocale", - "source_code": "def get_useLocale(self): return self._useLocale", - "docstring": "Return whether locale settings are used for formatting. See Also -------- ScalarFormatter.set_useLocale", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", - "ast_data": "FunctionDef name:get_useLocale arguments arg:self Return return:yes" - }, - { - "library": "mongo", - "name": "rename", - "source_code": "@_csot.apply def rename(self, new_name: str, session: Optional[ClientSession] = None, comment: Optional[Any] = None, **kwargs: Any) -> MutableMapping[str, Any]: if not isinstance(new_name, str): raise TypeError(f'new_name must be an instance of str, not {type(new_name)}') if not new_name or '..' in new_name: raise InvalidName('collection names cannot be empty') if new_name[0] = = '.' or new_name[-1] = = '.': raise InvalidName(\"collection names must not start or end with '.'\") if '$' in new_name and (not new_name.startswith('oplog.$main')): raise InvalidName(\"collection names must not contain '$'\") new_name = f'{self._database.name}.{new_name}' cmd = {'renameCollection': self._full_name, 'to': new_name} cmd.update(kwargs) if comment is not None: cmd['comment'] = comment write_concern = self._write_concern_for_cmd(cmd, session) with self._conn_for_writes(session, operation = _Op.RENAME) as conn: with self._database.client._tmp_session(session) as s: return conn.command('admin', cmd, write_concern = write_concern, parse_write_concern_error = True, session = s, client = self._database.client)", - "docstring": "Rename this collection. If operating in auth mode, client must be authorized as an admin to perform this operation. Raises :class: if is not an instance of :class:. Raises :class: if is not a valid collection name. :param new_name: new name for this collection :param session: a :class:. :param comment: A user-provided comment to attach to this command. :param kwargs: additional arguments to the rename command may be passed as keyword arguments to this helper method (i.e. `~pymongo.collection.Collection.write_concern` parameter. .. versionchanged:: 3.4 Apply this collection's write concern automatically to this operation when connected to MongoDB >= 3.4.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\collection.py", - "ast_data": "FunctionDef name:rename arguments arg:self arg:new_name type:str arg:session type:Optional[ClientSession] arg:comment type:Optional[Any] kwarg:kwargs If Raise raises:TypeError(f'new_name must be an instance of str, not {type(new_name)}') If BoolOp Compare op:In Raise raises:InvalidName('collection names cannot be empty') If BoolOp Compare op:Eq Compare op:Eq Raise raises:InvalidName(\"collection names must not start or end with '.'\") If BoolOp Compare op:In Raise raises:InvalidName(\"collection names must not contain '$'\") Assign Assign If Compare op:IsNot Assign Assign Call call:_write_concern_for_cmd With With Return return:yes" - }, - { - "library": "pytorch", - "name": "set_device", - "source_code": "def set_device(device: _device_t) -> None: pass", - "docstring": "Sets the current device, in CPU we do nothing. N.B. This function only exists to facilitate device-agnostic code", - "type": "function", - "file_path": "pytorch\\torch\\cpu\\__init__.py", - "ast_data": "FunctionDef name:set_device arguments arg:device type:_device_t" - }, - { - "library": "mongo", - "name": "writelines", - "source_code": "def writelines(self, sequence: Iterable[Any]) -> None: for line in sequence: self.write(line)", - "docstring": "Write a sequence of strings to the file. Does not add separators.", - "type": "method", - "file_path": "mongo\\gridfs\\synchronous\\grid_file.py", - "ast_data": "FunctionDef name:writelines arguments arg:self arg:sequence type:Iterable[Any] For" - }, - { - "library": "tensorflow", - "name": "get_next", - "source_code": "def get_next(self, device = None): if device is not None: index = self._devices.index(device) return self._device_iterators[index].get_next() result = [] for i, device in enumerate(self._devices): with ops.device(device): result.append(self._device_iterators[i].get_next()) return result", - "docstring": "Returns the next element given a , else returns all in a list.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\multi_device_iterator_ops.py", - "ast_data": "FunctionDef name:get_next arguments arg:self arg:device If Compare op:IsNot Assign Call call:index Return return:yes Assign For Call call:enumerate With Return return:yes" - }, - { - "library": "pytorch", - "name": "create_name", - "source_code": "def create_name(self, candidate: str, obj: Optional[Any]) -> str: if obj is not None and obj in self._obj_to_name: return self._obj_to_name[obj] match = _name_regex.match(candidate) if match is None: candidate = _illegal_char_regex.sub('_', candidate) if not candidate: candidate = '_unnamed' if candidate[0].isdigit(): candidate = f'_{candidate}' match = _name_regex.match(candidate) assert match is not None base, num = match.group(1, 2) if num is None or candidate in self._used_names: num = self._base_count.get(candidate, 0) if _illegal_names.get(candidate, obj) is not obj: num + = 1 candidate = f'{base}_{num}' else: num = int(num) while candidate in self._used_names: num + = 1 candidate = f'{base}_{num}' self._used_names.add(candidate) self._base_count[base] = num if obj is not None: self._obj_to_name[obj] = candidate return candidate", - "docstring": "Create a unique name. Arguments: candidate: used as the basis for the unique name, relevant to the user. obj: If not None, an object that will be associated with the unique name.", - "type": "method", - "file_path": "pytorch\\torch\\fx\\graph.py", - "ast_data": "FunctionDef name:create_name arguments arg:self arg:candidate type:str arg:obj type:Optional[Any] If BoolOp Compare op:IsNot Compare op:In Return return:yes Assign Call call:match If Compare op:Is Assign Call call:sub If Assign If Call call:isdigit Assign Assign Call call:match Assign Call call:group If BoolOp Compare op:Is Compare op:In Assign Call call:get If Compare op:IsNot Assign Assign Call call:int While Compare op:In Assign Assign If Compare op:IsNot Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "ones_like_impl", - "source_code": "def ones_like_impl(tensor, dtype, name, optimize = True, layout = None): with ops.name_scope(name, 'ones_like', [tensor]) as name: tensor = ops.convert_to_tensor(tensor, name = 'tensor') ones_shape = shape_internal(tensor, optimize = optimize) if dtype is None: dtype = tensor.dtype ret = ones(ones_shape, dtype = dtype, name = name, layout = layout) if not context.executing_eagerly(): ret.set_shape(tensor.get_shape()) return ret", - "docstring": "Internal implementation for the v1/v2 ones_like API calls.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", - "ast_data": "FunctionDef name:ones_like_impl arguments arg:tensor arg:dtype arg:name arg:optimize arg:layout With Assign Call call:convert_to_tensor Assign Call call:shape_internal If Compare op:Is Assign Assign Call call:ones If Return return:yes" - }, - { - "library": "django", - "name": "adapt_method_mode", - "source_code": "def adapt_method_mode(self, is_async, method, method_is_async = None, debug = False, name = None): if method_is_async is None: method_is_async = iscoroutinefunction(method) if debug and (not name): name = name or 'method %s()' % method.__qualname__ if is_async: if not method_is_async: if debug: logger.debug('Synchronous handler adapted for %s.', name) return sync_to_async(method, thread_sensitive = True) elif method_is_async: if debug: logger.debug('Asynchronous handler adapted for %s.', name) return async_to_sync(method) return method", - "docstring": "Adapt a method to be in the correct \"mode\": - If is_async is False: - Synchronous methods are left alone - Asynchronous methods are wrapped with async_to_sync - If is_async is True: - Synchronous methods are wrapped with sync_to_async() - Asynchronous methods are left alone", - "type": "method", - "file_path": "django\\django\\core\\handlers\\base.py", - "ast_data": "FunctionDef name:adapt_method_mode arguments arg:self arg:is_async arg:method arg:method_is_async arg:debug arg:name If Compare op:Is Assign Call call:iscoroutinefunction If BoolOp Assign BoolOp If If If Return return:yes If If Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "get_relations", - "source_code": "def get_relations(self, cursor, table_name): table_name = table_name.upper() cursor.execute('\\n SELECT ca.column_name, cb.table_name, cb.column_name\\n FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb\\n WHERE user_constraints.table_name = %s AND\\n user_constraints.constraint_name = ca.constraint_name AND\\n user_constraints.r_constraint_name = cb.constraint_name AND\\n ca.position = cb.position', [table_name]) return {self.identifier_converter(field_name): (self.identifier_converter(rel_field_name), self.identifier_converter(rel_table_name)) for field_name, rel_table_name, rel_field_name in cursor.fetchall()}", - "docstring": "Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all foreign keys in the given table.", - "type": "method", - "file_path": "django\\django\\db\\backends\\oracle\\introspection.py", - "ast_data": "FunctionDef name:get_relations arguments arg:self arg:cursor arg:table_name Assign Call call:upper Return return:yes" - }, - { - "library": "pytorch", - "name": "get_growth_factor", - "source_code": "def get_growth_factor(self) -> float: return self._growth_factor", - "docstring": "Return a Python float containing the scale growth factor.", - "type": "method", - "file_path": "pytorch\\torch\\amp\\grad_scaler.py", - "ast_data": "FunctionDef name:get_growth_factor arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "get_alternative_name", - "source_code": "def get_alternative_name(self, file_root, file_ext): return '%s_%s%s' % (file_root, get_random_string(7), file_ext)", - "docstring": "Return an alternative filename, by adding an underscore and a random 7 character alphanumeric string (before the file extension, if one exists) to the filename.", - "type": "method", - "file_path": "django\\django\\core\\files\\storage\\base.py", - "ast_data": "FunctionDef name:get_alternative_name arguments arg:self arg:file_root arg:file_ext Return return:yes" - }, - { - "library": "algorithms", - "name": "prime_check", - "source_code": "def prime_check(num): if num < = 1: return False if num = = 2 or num = = 3: return True if num % 2 = = 0 or num % 3 = = 0: return False j = 5 while j * j < = num: if num % j = = 0 or num % (j + 2) = = 0: return False j + = 6 return True", - "docstring": "Return True if num is a prime number Else return False.", - "type": "function", - "file_path": "algorithms\\algorithms\\maths\\diffie_hellman_key_exchange.py", - "ast_data": "FunctionDef name:prime_check arguments arg:num If Compare op:LtE Return return:yes If BoolOp Compare op:Eq Compare op:Eq Return return:yes If BoolOp Compare op:Eq Compare op:Eq Return return:yes Assign While Compare op:LtE If BoolOp Compare op:Eq Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "post", - "source_code": "def post(self, request, *args, **kwargs): form = self.get_form() if form.is_valid(): return self.form_valid(form) else: return self.form_invalid(form)", - "docstring": "Handle POST requests: instantiate a form instance with the passed POST variables and then check if it's valid.", - "type": "method", - "file_path": "django\\django\\views\\generic\\edit.py", - "ast_data": "FunctionDef name:post arguments arg:self arg:request vararg:args kwarg:kwargs Assign Call call:get_form If Call call:is_valid Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, path, *, zs = (), zdir = 'z', axlim_clip = False, **kwargs): Patch.__init__(self, **kwargs) self.set_3d_properties(path, zs, zdir, axlim_clip)", - "docstring": "Parameters ---------- path : zs : float The location along the *zdir* axis in 3D space to position the path patch. zdir : {'x', 'y', 'z', 3-tuple} Plane to plot path patch orthogonal to. Default: 'z'. See for a description of the values. axlim_clip : bool, default: False Whether to hide path patches with a point outside the axes view limits. .. versionadded:: 3.10", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:path kwarg:kwargs" - }, - { - "library": "kornia", - "name": "forward", - "source_code": "@torch.inference_mode() def forward(self, images: Union[Tensor, list[Tensor]]) -> Union[Tensor, list[Tensor]]: images, images_sizes = self.pre_processor(images) logits, boxes = self.model(images) detections = self.post_processor(logits, boxes, images_sizes) return detections", - "docstring": "Detect objects in a given list of images. Args: images: If list of RGB images. Each image is a Tensor with shape :math:. If Tensor, a Tensor with shape :math:. Returns: list of detections found in each image. For item in a batch, shape is :math:, where :math: is the number of detections in the given image, :math: represents class id, score, and bounding box.", - "type": "method", - "file_path": "kornia\\kornia\\models\\detection\\base.py", - "ast_data": "FunctionDef name:forward arguments arg:self arg:images type:Union[Tensor, list[Tensor]] Call call:inference_mode Assign Call call:pre_processor Assign Call call:model Assign Call call:post_processor Return return:yes" - }, - { - "library": "tensorflow", - "name": "serialize", - "source_code": "def serialize(metric): return serialize_keras_object(metric)", - "docstring": "Serializes metric function or instance. Args: metric: A Keras instance or a metric function. Returns: Metric configuration dictionary.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", - "ast_data": "FunctionDef name:serialize arguments arg:metric Return return:yes" - }, - { - "library": "kornia", - "name": "forward_with_coords", - "source_code": "def forward_with_coords(self, coords_input: Tensor, image_size: tuple[int, int]) -> Tensor: coords = coords_input.clone() coords[:, :, 0] = coords[:, :, 0] / image_size[1] coords[:, :, 1] = coords[:, :, 1] / image_size[0] return self._pe_encoding(coords.to(torch.float32))", - "docstring": "Positionally encode points that are not normalized to [0,1].", - "type": "method", - "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\prompt_encoder.py", - "ast_data": "FunctionDef name:forward_with_coords arguments arg:self arg:coords_input type:Tensor arg:image_size type:tuple[int, int] Assign Call call:clone Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "manual_seed_all", - "source_code": "def manual_seed_all(seed: int) -> None: seed = int(seed) def cb(): for i in range(device_count()): default_generator = torch.cuda.default_generators[i] default_generator.manual_seed(seed) _lazy_call(cb, seed_all = True)", - "docstring": "Set the seed for generating random numbers on all GPUs. It's safe to call this function if CUDA is not available; in that case, it is silently ignored. Args: seed (int): The desired seed.", - "type": "function", - "file_path": "pytorch\\torch\\cuda\\random.py", - "ast_data": "FunctionDef name:manual_seed_all arguments arg:seed type:int Assign Call call:int FunctionDef name:cb arguments For Call call:range Assign" - }, - { - "library": "tensorflow", - "name": "output_shape", - "source_code": "@property def output_shape(self): if not self._inbound_nodes: raise AttributeError('The layer has never been called and thus has no defined output shape.') all_output_shapes = set([str(node.output_shapes) for node in self._inbound_nodes]) if len(all_output_shapes) = = 1: return self._inbound_nodes[0].output_shapes else: raise AttributeError('The layer \"%s\" has multiple inbound nodes, with different output shapes. Hence the notion of \"output shape\" is ill-defined for the layer. Use `get_output_shape_at(node_index)` instead.' % self.name)", - "docstring": "Retrieves the output shape(s) of a layer. Only applicable if the layer has one output, or if all outputs have the same shape. Returns: Output shape, as an integer shape tuple (or list of shape tuples, one tuple per output tensor). Raises: AttributeError: if the layer has no defined output shape. RuntimeError: if called in Eager mode.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py", - "ast_data": "FunctionDef name:output_shape arguments arg:self If Raise raises:AttributeError('The layer has never been called and thus has no defined output shape.') Assign Call call:set If Compare op:Eq Return return:yes Raise raises:AttributeError('The layer \"%s\" has multiple inbound nodes, with different output shapes. Hence the notion of \"output shape\" is ill-defined for the layer. Use `get_output_shape_at(node_index)` instead.' % self.name)" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, bounds, transform): self._bounds = bounds self._transform = transform", - "docstring": "*bounds* (a `` rectangle) and *transform* together specify the position of the inset Axes.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:bounds arg:transform Assign Assign" - }, - { - "library": "matplotlib", - "name": "get_box_aspect", - "source_code": "def get_box_aspect(self): return self._box_aspect", - "docstring": "Return the Axes box aspect, i.e. the ratio of height to width. The box aspect is `` (i.e. chosen depending on the available figure space) unless explicitly specified. See Also -------- matplotlib.axes.Axes.set_box_aspect for a description of box aspect. matplotlib.axes.Axes.set_aspect for a description of aspect handling.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", - "ast_data": "FunctionDef name:get_box_aspect arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "initialize", - "source_code": "def initialize(self, table): check_table_dtypes(table, self.key_dtype, self.value_dtype) with ops.name_scope(self._name, 'text_file_init', (table.resource_handle,)): filename = ops.convert_to_tensor(self._filename, dtypes.string, name = 'asset_filepath') init_op = gen_lookup_ops.initialize_table_from_text_file_v2(table.resource_handle, filename, self._key_index, self._value_index, -1 if self._vocab_size is None else self._vocab_size, self._delimiter, self._offset) ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op) if not context.executing_eagerly() and constant_op.is_constant(filename): ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, filename) return init_op", - "docstring": "Initializes the table from a text file. Args: table: The table to be initialized. Returns: The operation that initializes the table. Raises: TypeError: when the keys and values data types do not match the table key and value data types.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", - "ast_data": "FunctionDef name:initialize arguments arg:self arg:table With Assign Call call:convert_to_tensor Assign Call call:initialize_table_from_text_file_v2 If BoolOp Call call:is_constant Return return:yes" - }, - { - "library": "tensorflow", - "name": "minimum", - "source_code": "@property def minimum(self): return self._minimum", - "docstring": "Returns a NumPy array specifying the minimum bounds (inclusive).", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py", - "ast_data": "FunctionDef name:minimum arguments arg:self Return return:yes" - }, - { - "library": "numpy", - "name": "count_masked", - "source_code": "def count_masked(arr, axis = None): m = getmaskarray(arr) return m.sum(axis)", - "docstring": "Count the number of masked elements along the given axis. Parameters ---------- arr : array_like An array with (possibly) masked elements. axis : int, optional Axis along which to count. If None (default), a flattened version of the array is used. Returns ------- count : int, ndarray The total number of masked elements (axis=None) or the number of masked elements along each slice of the given axis. See Also -------- MaskedArray.count : Count non-masked elements. Examples -------- >>> import numpy as np >>> a = np.arange(9).reshape((3,3)) >>> a = np.ma.array(a) >>> a[1, 0] = np.ma.masked >>> a[1, 2] = np.ma.masked >>> a[2, 1] = np.ma.masked >>> a masked_array( data=[[0, 1, 2], [--, 4, --], [6, --, 8]], mask=[[False, False, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> np.ma.count_masked(a) 3 When the keyword is used an array is returned. >>> np.ma.count_masked(a, axis=0) array([1, 1, 1]) >>> np.ma.count_masked(a, axis=1) array([0, 2, 1])", - "type": "function", - "file_path": "numpy\\numpy\\ma\\extras.py", - "ast_data": "FunctionDef name:count_masked arguments arg:arr arg:axis Assign Call call:getmaskarray Return return:yes" - }, - { - "library": "pytorch", - "name": "normal_", - "source_code": "def normal_(tensor: Tensor, mean: float = 0.0, std: float = 1.0, generator: _Optional[torch.Generator] = None) -> Tensor: if torch.overrides.has_torch_function_variadic(tensor): return torch.overrides.handle_torch_function(normal_, (tensor,), tensor = tensor, mean = mean, std = std, generator = generator) return _no_grad_normal_(tensor, mean, std, generator)", - "docstring": "Fill the input Tensor with values drawn from the normal distribution. :math:. Args: tensor: an n-dimensional mean: the mean of the normal distribution std: the standard deviation of the normal distribution generator: the torch Generator to sample from (default: None) Examples: >>> w = torch.empty(3, 5) >>> nn.init.normal_(w)", - "type": "function", - "file_path": "pytorch\\torch\\nn\\init.py", - "ast_data": "FunctionDef name:normal_ arguments arg:tensor type:Tensor arg:mean type:float arg:std type:float arg:generator type:_Optional[torch.Generator] If Call call:has_torch_function_variadic Return return:yes Return return:yes" - }, - { - "library": "scrapy", - "name": "request_httprepr", - "source_code": "def request_httprepr(request: Request) -> bytes: parsed = urlparse_cached(request) path = urlunparse(('', '', parsed.path or '/', parsed.params, parsed.query, '')) s = to_bytes(request.method) + b' ' + to_bytes(path) + b' HTTP/1.1\\r\\n' s + = b'Host: ' + to_bytes(parsed.hostname or b'') + b'\\r\\n' if request.headers: s + = request.headers.to_string() + b'\\r\\n' s + = b'\\r\\n' s + = request.body return s", - "docstring": "Return the raw HTTP representation (as bytes) of the given request. This is provided only for reference since it's not the actual stream of bytes that will be send when performing the request (that's controlled by Twisted).", - "type": "function", - "file_path": "scrapy\\scrapy\\utils\\request.py", - "ast_data": "FunctionDef name:request_httprepr arguments arg:request type:Request Assign Call call:urlparse_cached Assign Call call:urlunparse Assign If Return return:yes" - }, - { - "library": "numpy", - "name": "masked_greater", - "source_code": "def masked_greater(x, value, copy = True): return masked_where(greater(x, value), x, copy = copy)", - "docstring": "Mask an array where greater than a given value. This function is a shortcut to `condition` = (x > value). See Also -------- masked_where : Mask where a condition is met. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> a = np.arange(4) >>> a array([0, 1, 2, 3]) >>> ma.masked_greater(a, 2) masked_array(data=[0, 1, 2, --], mask=[False, False, False, True], fill_value=999999)", - "type": "function", - "file_path": "numpy\\numpy\\ma\\core.py", - "ast_data": "FunctionDef name:masked_greater arguments arg:x arg:value arg:copy Return return:yes" - }, - { - "library": "scikit-learn", - "name": "fit", - "source_code": "def fit(self, X, y = None): self.fit_transform(X) return self", - "docstring": "Fit model on training data X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : Ignored Not used, present here for API consistency by convention. Returns ------- self : object Returns the transformer object.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\decomposition\\_truncated_svd.py", - "ast_data": "FunctionDef name:fit arguments arg:self arg:X arg:y Return return:yes" - }, - { - "library": "tensorflow", - "name": "deserialize", - "source_code": "def deserialize(config, custom_objects = None): populate_deserializable_objects() return generic_utils.deserialize_keras_object(config, module_objects = LOCAL.ALL_OBJECTS, custom_objects = custom_objects, printable_module_name = 'layer')", - "docstring": "Instantiates a layer from a config dictionary. Args: config: dict of the form {'class_name': str, 'config': dict} custom_objects: dict mapping class names (or function names) of custom (non-Keras) objects to class/functions Returns: Layer instance (may be Model, Sequential, Network, Layer...)", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\serialization.py", - "ast_data": "FunctionDef name:deserialize arguments arg:config arg:custom_objects Return return:yes" - }, - { - "library": "django", - "name": "FallbackStorage", - "source_code": "class FallbackStorage(BaseStorage): storage_classes = (CookieStorage, SessionStorage) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.storages = [storage_class(*args, **kwargs) for storage_class in self.storage_classes] self._used_storages = set() def _get(self, *args, **kwargs): all_messages = [] for storage in self.storages: messages, all_retrieved = storage._get() if messages is None: break if messages: self._used_storages.add(storage) all_messages.extend(messages) if all_retrieved: break return (all_messages, all_retrieved) def _store(self, messages, response, *args, **kwargs): for storage in self.storages: if messages: messages = storage._store(messages, response, remove_oldest = False) elif storage in self._used_storages: storage._store([], response) self._used_storages.remove(storage) return messages", - "docstring": "Try to store all messages in the first backend. Store any unstored messages in each subsequent backend.", - "type": "class", - "file_path": "django\\django\\contrib\\messages\\storage\\fallback.py", - "ast_data": "ClassDef name:FallbackStorage Assign FunctionDef name:__init__ arguments arg:self vararg:args kwarg:kwargs Assign Assign Call call:set FunctionDef name:_get arguments arg:self vararg:args kwarg:kwargs Assign For Assign Call call:_get If Compare op:Is If If Return return:yes FunctionDef name:_store arguments arg:self arg:messages arg:response vararg:args kwarg:kwargs For If Assign Call call:_store If Compare op:In Return return:yes" - }, - { - "library": "mongo", - "name": "__init__", - "source_code": "def __init__(self, data: Union[_OpMsg, _OpReply], address: _Address, request_id: int, duration: Optional[timedelta], from_command: bool, docs: Sequence[Mapping[str, Any]]): self._data = data self._address = address self._request_id = request_id self._duration = duration self._from_command = from_command self._docs = docs", - "docstring": "Represent a response from the server. :param data: A network response message. :param address: (host, port) of the source server. :param request_id: The request id of this operation. :param duration: The duration of the operation. :param from_command: if the response is the result of a db command.", - "type": "method", - "file_path": "mongo\\pymongo\\response.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:data type:Union[_OpMsg, _OpReply] arg:address type:_Address arg:request_id type:int arg:duration type:Optional[timedelta] arg:from_command type:bool arg:docs type:Sequence[Mapping[str, Any]] Assign Assign Assign Assign Assign Assign" - }, - { - "library": "pytorch", - "name": "unsqueeze", - "source_code": "@_onnx_symbolic('aten: : unsqueeze') @symbolic_helper.parse_args('v', 'i') def unsqueeze(g: jit_utils.GraphContext, self, dim): if dim < 0: rank = symbolic_helper._get_tensor_rank(self) if rank is not None: warnings.warn('ONNX export unsqueeze with negative axis ' + str(dim) + ' might cause the onnx model to be incorrect. ' + 'Negative axis is not supported in ONNX. ' + 'Axis is converted to ' + str(dim + rank + 1) + ' based on input shape at export time. ' + 'Passing an tensor of different rank in execution will be incorrect.') dim = dim + rank + 1 else: return symbolic_helper._unimplemented('unsqueeze', 'negative axis with unknown input rank', self) return symbolic_helper._unsqueeze_helper(g, self, axes_i = [dim])", - "docstring": "Implement unsqueezing a pytorch tensor in ONNX by inserting a new dimension at the specified", - "type": "function", - "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py", - "ast_data": "FunctionDef name:unsqueeze arguments arg:g type:jit_utils.GraphContext arg:self arg:dim Call call:_onnx_symbolic Call call:parse_args If Compare op:Lt Assign Call call:_get_tensor_rank If Compare op:IsNot Assign Return return:yes Return return:yes" - }, - { - "library": "kornia", - "name": "vec_like", - "source_code": "def vec_like(n: int, tensor: Tensor, shared_memory: bool = False) -> Tensor: if n < = 0: raise AssertionError(type(n), n) if len(tensor.shape) < 1: raise AssertionError(tensor.shape) vec = zeros(n, 1, device = tensor.device, dtype = tensor.dtype) return vec[None].expand(tensor.shape[0], n, 1) if shared_memory else vec[None].repeat(tensor.shape[0], 1, 1)", - "docstring": "Return a 2-D tensor with a vector containing zeros with the same batch size as the input. Args: n: the number of rows :math:. tensor: image tensor that will determine the batch size of the output matrix. The expected shape is :math:. shared_memory: when set, all samples in the batch will share the same memory. Returns: The vector with the same batch size as the input :math:. Notes: When the dimension to expand is of size 1, using torch.expand(...) yields the same tensor as torch.repeat(...) without using extra memory. Thus, when the tensor obtained by this method will be later assigned - use this method with shared_memory=False, otherwise, prefer using it with shared_memory=True.", - "type": "function", - "file_path": "kornia\\kornia\\utils\\misc.py", - "ast_data": "FunctionDef name:vec_like arguments arg:n type:int arg:tensor type:Tensor arg:shared_memory type:bool If Compare op:LtE Raise raises:AssertionError(type(n), n) If Compare op:Lt Raise raises:AssertionError(tensor.shape) Assign Call call:zeros Return return:yes" - }, - { - "library": "tensorflow", - "name": "CodeObjectCache", - "source_code": "class CodeObjectCache(_TransformedFnCache): def _get_key(self, entity): if hasattr(entity, '__code__'): return entity.__code__ else: return entity", - "docstring": "A function cache based on code objects. Code objects are good proxies for the source code of a function. This cache efficiently handles functions that share code objects, such as functions defined in a loop, bound methods, etc. The cache falls back to the function object, if it doesn't have a code object.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cache.py", - "ast_data": "ClassDef name:CodeObjectCache FunctionDef name:_get_key arguments arg:self arg:entity If Call call:hasattr Return return:yes Return return:yes" - }, - { - "library": "authlib", - "name": "validate_claims_supported", - "source_code": "def validate_claims_supported(self): validate_array_value(self, 'claims_supported')", - "docstring": "RECOMMENDED. JSON array containing a list of the Claim Names of the Claims that the OpenID Provider MAY be able to supply values for. Note that for privacy or other reasons, this might not be an exhaustive list.", - "type": "method", - "file_path": "authlib\\authlib\\oidc\\discovery\\models.py", - "ast_data": "FunctionDef name:validate_claims_supported arguments arg:self" - }, - { - "library": "numpy", - "name": "append", - "source_code": "def append(a, b, axis = None): return concatenate([a, b], axis)", - "docstring": "Append values to the end of an array. Parameters ---------- a : array_like Values are appended to a copy of this array. b : array_like These values are appended to a copy of . It must be of the correct shape (the same shape as , excluding ). If is not specified, can be any shape and will be flattened before use. axis : int, optional The axis along which are appended. If is not given, both and are flattened before use. Returns ------- append : MaskedArray A copy of with appended to . Note that does not occur in-place: a new array is allocated and filled. If is None, the result is a flattened array. See Also -------- numpy.append : Equivalent function in the top-level NumPy module. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.masked_values([1, 2, 3], 2) >>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7) >>> ma.append(a, b) masked_array(data=[1, --, 3, 4, 5, 6, --, 8, 9], mask=[False, True, False, False, False, False, True, False, False], fill_value=999999)", - "type": "function", - "file_path": "numpy\\numpy\\ma\\core.py", - "ast_data": "FunctionDef name:append arguments arg:a arg:b arg:axis Return return:yes" - }, - { - "library": "numpy", - "name": "english_lower", - "source_code": "def english_lower(s): lowered = s.translate(LOWER_TABLE) return lowered", - "docstring": "Apply English case rules to convert ASCII strings to all lower case. This is an internal utility function to replace calls to str.lower() such that we can avoid changing behavior with changing locales. In particular, Turkish has distinct dotted and dotless variants of the Latin letter \"I\" in both lowercase and uppercase. Thus, \"I\".lower() != \"i\" in a \"tr\" locale. Parameters ---------- s : str Returns ------- lowered : str Examples -------- >>> from numpy._core.numerictypes import english_lower >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_') 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_' >>> english_lower('') ''", - "type": "function", - "file_path": "numpy\\numpy\\_core\\_string_helpers.py", - "ast_data": "FunctionDef name:english_lower arguments arg:s Assign Call call:translate Return return:yes" - }, - { - "library": "tensorflow", - "name": "put", - "source_code": "def put(self, closure, tag = None): closure.tag = tag if tag is not None: with self._queue_lock: self._tagged_queue[tag].put(closure, block = False) self._closures_queued_condition.notify_all() else: with self._put_wait_lock, self._queue_lock: self._queue_free_slot_condition.wait_for(lambda: not self._queue.full()) self._queue.put(closure, block = False) metric_utils.monitor_int('queued_closures', self._queue.qsize()) self._raise_if_error() self._closures_queued_condition.notify()", - "docstring": "Put a closure into the queue for later execution. If was called before , the error from the first invocation of will be raised. Args: closure: The to put into the queue. tag: if not None, put into a queue with the given tag.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py", - "ast_data": "FunctionDef name:put arguments arg:self arg:closure arg:tag Assign If Compare op:IsNot With With" - }, - { - "library": "matplotlib", - "name": "on_clicked", - "source_code": "def on_clicked(self, func): return self._observers.connect('clicked', func)", - "docstring": "Connect the callback function *func* to button click events. Parameters ---------- func : callable When the button is clicked, call *func* with button label. When all buttons are cleared, call *func* with None. The callback func must have the signature:: def func(label: str | None) -> Any Return values may exist, but are ignored. Returns ------- A connection id, which can be used to disconnect the callback.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", - "ast_data": "FunctionDef name:on_clicked arguments arg:self arg:func Return return:yes" - }, - { - "library": "flexx", - "name": "get_resoure_path", - "source_code": "def get_resoure_path(filename): dest = os.path.abspath(os.path.join(__file__, '..', '..', 'resources')) if not os.path.isdir(dest): raise ValueError('Resource dest dir %r is not a directory.' % dest) path = os.path.join(dest, filename) url = '' if filename in RESOURCES: url, tag = RESOURCES[filename] if tag: url = url.replace('{}', tag) basename, ext = path.rsplit('.', 1) path = basename + '.' + tag + '.' + ext if not os.path.isfile(path): data = _fetch_file(url) with open(path, 'wb') as f: f.write(data) elif not os.path.isfile(path): raise ValueError('Unknown/unavailable resource %r' % filename) return path", - "docstring": "Get the full path to a resource, corresponding to the given filename. Will use cached version if available. Otherwise will download and cache.", - "type": "function", - "file_path": "flexx\\flexx\\util\\getresource.py", - "ast_data": "FunctionDef name:get_resoure_path arguments arg:filename Assign Call call:abspath If Raise raises:ValueError('Resource dest dir %r is not a directory.' % dest) Assign Call call:join Assign If Compare op:In Assign If Assign Call call:replace Assign Call call:rsplit Assign If Assign Call call:_fetch_file With If Raise raises:ValueError('Unknown/unavailable resource %r' % filename) Return return:yes" - }, - { - "library": "pandas", - "name": "validate_putmask", - "source_code": "def validate_putmask(values: ArrayLike | MultiIndex, mask: np.ndarray) -> tuple[npt.NDArray[np.bool_], bool]: mask = extract_bool_array(mask) if mask.shape ! = values.shape: raise ValueError('putmask: mask and data must be the same size') noop = not mask.any() return (mask, noop)", - "docstring": "Validate mask and check if this putmask operation is a no-op.", - "type": "function", - "file_path": "pandas\\pandas\\core\\array_algos\\putmask.py", - "ast_data": "FunctionDef name:validate_putmask arguments arg:values type:ArrayLike | MultiIndex arg:mask type:np.ndarray Assign Call call:extract_bool_array If Compare op:NotEq Raise raises:ValueError('putmask: mask and data must be the same size') Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "get_backend_config", - "source_code": "def get_backend_config(group: Optional[ProcessGroup] = None) -> str: pg = group or _get_default_group() if _rank_not_in_group(pg): raise ValueError('Invalid process group specified') backend_config = _world.pg_backend_config.get(pg) return str(not_none(backend_config))", - "docstring": "Return the backend configuration of the given process group. Args: group (ProcessGroup, optional): The process group to work on. The default is the general main process group. If another specific group is specified, the calling process must be part of :attr:. Returns: The backend configuration of the given process group as a lower case string.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", - "ast_data": "FunctionDef name:get_backend_config arguments arg:group type:Optional[ProcessGroup] Assign BoolOp Call call:_get_default_group If Call call:_rank_not_in_group Raise raises:ValueError('Invalid process group specified') Assign Call call:get Return return:yes" - }, - { - "library": "numpy", - "name": "shape", - "source_code": "@array_function_dispatch(_shape_dispatcher) def shape(a): try: result = a.shape except AttributeError: result = asarray(a).shape return result", - "docstring": "Return the shape of an array. Parameters ---------- a : array_like Input array. Returns ------- shape : tuple of ints The elements of the shape tuple give the lengths of the corresponding array dimensions. See Also -------- len : ``. ndarray.shape : Equivalent array method. Examples -------- >>> import numpy as np >>> np.shape(np.eye(3)) (3, 3) >>> np.shape([[1, 3]]) (1, 2) >>> np.shape([0]) (1,) >>> np.shape(0) () >>> a = np.array([(1, 2), (3, 4), (5, 6)], ... dtype=[('x', 'i4'), ('y', 'i4')]) >>> np.shape(a) (3,) >>> a.shape (3,)", - "type": "function", - "file_path": "numpy\\numpy\\_core\\fromnumeric.py", - "ast_data": "FunctionDef name:shape arguments arg:a Call call:array_function_dispatch Try Assign ExceptHandler Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "composition_to_dims", - "source_code": "def composition_to_dims(composition: Sequence[Union[list[Union[str, AnonymousAxis]], str]]) -> list[Union[str, tuple[str, ...]]]: dim_composition: list[Union[str, tuple[str, ...]]] = [] for dimension in composition: if isinstance(dimension, list): dim_composition.append(tuple((dim for identifier in dimension for dim in identifier_dim_map[identifier]))) elif dimension = = _ellipsis: dim_composition.extend(identifier_dim_map[_ellipsis]) else: raise ValueError(f'Unexpected dimension: {dimension}') return dim_composition", - "docstring": "Convert a into a index of strings representing first class dims.", - "type": "function", - "file_path": "pytorch\\functorch\\einops\\rearrange.py", - "ast_data": "FunctionDef name:composition_to_dims arguments arg:composition type:Sequence[Union[list[Union[str, AnonymousAxis]], str]] For If Call call:isinstance If Compare op:Eq Raise raises:ValueError(f'Unexpected dimension: {dimension}') Return return:yes" - }, - { - "library": "sphinx", - "name": "InventoryFileReader", - "source_code": "class InventoryFileReader: def __init__(self, stream: _SupportsRead) -> None: self.stream = stream self.buffer = b'' self.eof = False def read_buffer(self) -> None: chunk = self.stream.read(BUFSIZE) if chunk = = b'': self.eof = True self.buffer + = chunk def readline(self) -> str: pos = self.buffer.find(b'\\n') if pos ! = -1: line = self.buffer[: pos].decode() self.buffer = self.buffer[pos + 1:] elif self.eof: line = self.buffer.decode() self.buffer = b'' else: self.read_buffer() line = self.readline() return line def readlines(self) -> Iterator[str]: while not self.eof: line = self.readline() if line: yield line def read_compressed_chunks(self) -> Iterator[bytes]: decompressor = zlib.decompressobj() while not self.eof: self.read_buffer() yield decompressor.decompress(self.buffer) self.buffer = b'' yield decompressor.flush() def read_compressed_lines(self) -> Iterator[str]: buf = b'' for chunk in self.read_compressed_chunks(): buf + = chunk pos = buf.find(b'\\n') while pos ! = -1: yield buf[: pos].decode() buf = buf[pos + 1:] pos = buf.find(b'\\n')", - "docstring": "A file reader for an inventory file. This reader supports mixture of texts and compressed texts.", - "type": "class", - "file_path": "sphinx\\sphinx\\util\\_inventory_file_reader.py", - "ast_data": "ClassDef name:InventoryFileReader FunctionDef name:__init__ arguments arg:self arg:stream type:_SupportsRead Assign Assign Assign FunctionDef name:read_buffer arguments arg:self Assign Call call:read If Compare op:Eq Assign FunctionDef name:readline arguments arg:self Assign Call call:find If Compare op:NotEq Assign Call call:decode Assign If Assign Call call:decode Assign Assign Call call:readline Return return:yes FunctionDef name:readlines arguments arg:self While Assign Call call:readline If FunctionDef name:read_compressed_chunks arguments arg:self Assign Call call:decompressobj While Assign FunctionDef name:read_compressed_lines arguments arg:self Assign For Call call:read_compressed_chunks Assign Call call:find While Compare op:NotEq Assign Assign Call call:find" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, additional_note = '', kwargs_dict = None): self._additional_note = additional_note if kwargs_dict: bullets = [] for key in sorted(kwargs_dict.keys()): value = kwargs_dict[key] if any((x.isspace() for x in key)): raise ValueError('Parameter name \"%s\" contains whitespace.' % key) value = value.lstrip() if '\\n' in value: raise ValueError('Parameter description for \"%s\" contains newlines.' % key) bullets.append('* `%s`: %s' % (key, value)) self._additional_note + = '\\n\\n##### `kwargs`: \\n\\n' + '\\n'.join(bullets)", - "docstring": "Initializes the AppendDocstring object. Args: additional_note: Python string added as additional docstring to public version of function. kwargs_dict: Python string/string dictionary representing specific kwargs expanded from the **kwargs input. Raises: ValueError: if kwargs_dict.key contains whitespace. ValueError: if kwargs_dict.value contains newlines.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\util.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:additional_note arg:kwargs_dict Assign If Assign For Call call:sorted Assign If Call call:any Raise raises:ValueError('Parameter name \"%s\" contains whitespace.' % key) Assign Call call:lstrip If Compare op:In Raise raises:ValueError('Parameter description for \"%s\" contains newlines.' % key)" - }, - { - "library": "pandas", - "name": "dtypes", - "source_code": "@property def dtypes(self) -> Series: from pandas import Index, Series pa_type = self._data.dtype.pyarrow_dtype types = [ArrowDtype(struct.type) for struct in pa_type] names = [struct.name for struct in pa_type] return Series(types, index = Index(names))", - "docstring": "Return the dtype object of each child field of the struct. Returns ------- pandas.Series The data type of each child field. See Also -------- Series.dtype: Return the dtype object of the underlying data. Examples -------- >>> import pyarrow as pa >>> s = pd.Series( ... [ ... {\"version\": 1, \"project\": \"pandas\"}, ... {\"version\": 2, \"project\": \"pandas\"}, ... {\"version\": 1, \"project\": \"numpy\"}, ... ], ... dtype=pd.ArrowDtype( ... pa.struct([(\"version\", pa.int64()), (\"project\", pa.string())]) ... ), ... ) >>> s.struct.dtypes version int64[pyarrow] project string[pyarrow] dtype: object", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\arrow\\accessors.py", - "ast_data": "FunctionDef name:dtypes arguments arg:self Assign Assign Assign Return return:yes" - }, - { - "library": "flexx", - "name": "get_component_classes", - "source_code": "def get_component_classes(): return [c for c in AppComponentMeta.CLASSES]", - "docstring": "Get a list of all known PyComponent and JsComponent subclasses.", - "type": "function", - "file_path": "flexx\\flexx\\app\\_component2.py", - "ast_data": "FunctionDef name:get_component_classes arguments Return return:yes" - }, - { - "library": "matplotlib", - "name": "line_collection_2d_to_3d", - "source_code": "def line_collection_2d_to_3d(col, zs = 0, zdir = 'z', axlim_clip = False): segments3d = _paths_to_3d_segments(col.get_paths(), zs, zdir) col.__class__ = Line3DCollection col.set_segments(segments3d) col._axlim_clip = axlim_clip", - "docstring": "Convert a to a object.", - "type": "function", - "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", - "ast_data": "FunctionDef name:line_collection_2d_to_3d arguments arg:col arg:zs arg:zdir arg:axlim_clip Assign Call call:_paths_to_3d_segments Assign Assign" - }, - { - "library": "matplotlib", - "name": "seq_id", - "source_code": "def seq_id(): return '%06d' % next(_layoutboxobjnum)", - "docstring": "Generate a short sequential id for layoutbox objects.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py", - "ast_data": "FunctionDef name:seq_id arguments Return return:yes" - }, - { - "library": "tensorflow", - "name": "set_soft_device_placement", - "source_code": "@tf_export('__internal__.eager_context.set_soft_device_placement', v1 = []) def set_soft_device_placement(enabled): context().soft_device_placement = enabled", - "docstring": "Set if soft device placements should be allowed. Args: enabled: Whether to enable soft device placement.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", - "ast_data": "FunctionDef name:set_soft_device_placement arguments arg:enabled Call call:tf_export Assign" - }, - { - "library": "scipy", - "name": "reset", - "source_code": "def reset(self, n, has_jac): pass", - "docstring": "Prepare integrator for call: allocate memory, set flags, etc. n - number of equations. has_jac - if user has supplied function for evaluating Jacobian.", - "type": "method", - "file_path": "scipy\\scipy\\integrate\\_ode.py", - "ast_data": "FunctionDef name:reset arguments arg:self arg:n arg:has_jac" - }, - { - "library": "django", - "name": "geographic", - "source_code": "@property def geographic(self): return bool(capi.isgeographic(self.ptr))", - "docstring": "Return True if this SpatialReference is geographic (root node is GEOGCS).", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py", - "ast_data": "FunctionDef name:geographic arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "BackwardState", - "source_code": "class BackwardState: proxy: torch.fx.Proxy", - "docstring": "BackwardState is used to pass Python hooks from the forwards pass into the backwards pass in Dynamo+Compiled Autograd. It is created by TorchDynamo and has special handling there. Dynamo will pass an empty BackwardState to the forwards, then populate members on it (via setattr) only after the forwards graph is finished. Later on, in CompileAutograd we will inline and add the needed guards on the BackwardState. BackwardState is identified and has special handling in AOTAutograd. During AOTAutograd: 1) BackwardState is an input to the forwards graph 2) It must only be used in the backwards 3) It will be empty in the forwards 4) In the forwards we add a wrapper to save it 5) In the backwards it becomes an input 6) There can only be one per graph BackwardState requires CompiledAutograd.", - "type": "class", - "file_path": "pytorch\\torch\\fx\\experimental\\_backward_state.py", - "ast_data": "ClassDef name:BackwardState" - }, - { - "library": "scipy", - "name": "expm_frechet_block_enlarge", - "source_code": "def expm_frechet_block_enlarge(A, E): n = A.shape[0] M = np.vstack([np.hstack([A, E]), np.hstack([np.zeros_like(A), A])]) expm_M = scipy.linalg.expm(M) return (expm_M[: n, : n], expm_M[: n, n:])", - "docstring": "This is a helper function, mostly for testing and profiling. Return expm(A), frechet(A, E)", - "type": "function", - "file_path": "scipy\\scipy\\linalg\\_expm_frechet.py", - "ast_data": "FunctionDef name:expm_frechet_block_enlarge arguments arg:A arg:E Assign Assign Call call:vstack Assign Call call:expm Return return:yes" - }, - { - "library": "numpy", - "name": "hermegauss", - "source_code": "def hermegauss(deg): ideg = pu._as_int(deg, 'deg') if ideg < = 0: raise ValueError('deg must be a positive integer') c = np.array([0] * deg + [1]) m = hermecompanion(c) x = la.eigvalsh(m) dy = _normed_hermite_e_n(x, ideg) df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg) x - = dy / df fm = _normed_hermite_e_n(x, ideg - 1) fm / = np.abs(fm).max() w = 1 / (fm * fm) w = (w + w[: : -1]) / 2 x = (x - x[: : -1]) / 2 w * = np.sqrt(2 * np.pi) / w.sum() return (x, w)", - "docstring": "Gauss-HermiteE quadrature. Computes the sample points and weights for Gauss-HermiteE quadrature. These sample points and weights will correctly integrate polynomials of degree :math: or less over the interval :math: with the weight function :math:. Parameters ---------- deg : int Number of sample points and weights. It must be >= 1. Returns ------- x : ndarray 1-D ndarray containing the sample points. y : ndarray 1-D ndarray containing the weights. Notes ----- The results have only been tested up to degree 100, higher degrees may be problematic. The weights are determined by using the fact that .. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k)) where :math: is a constant independent of :math: and :math: is the k'th root of :math:, and then scaling the results to get the right value when integrating 1.", - "type": "function", - "file_path": "numpy\\numpy\\polynomial\\hermite_e.py", - "ast_data": "FunctionDef name:hermegauss arguments arg:deg Assign Call call:_as_int If Compare op:LtE Raise raises:ValueError('deg must be a positive integer') Assign Call call:array Assign Call call:hermecompanion Assign Call call:eigvalsh Assign Call call:_normed_hermite_e_n Assign Assign Call call:_normed_hermite_e_n Assign Assign Assign Return return:yes" - }, - { - "library": "numpy", - "name": "correlate", - "source_code": "def correlate(a, v, mode = 'valid', propagate_mask = True): return _convolve_or_correlate(np.correlate, a, v, mode, propagate_mask)", - "docstring": "Cross-correlation of two 1-dimensional sequences. Parameters ---------- a, v : array_like Input sequences. mode : {'valid', 'same', 'full'}, optional Refer to the docstring. Note that the default is 'valid', unlike , which uses 'full'. propagate_mask : bool If True, then a result element is masked if any masked element contributes towards it. If False, then a result element is only masked if no non-masked element contribute towards it Returns ------- out : MaskedArray Discrete cross-correlation of and . See Also -------- numpy.correlate : Equivalent function in the top-level NumPy module. Examples -------- Basic correlation: >>> a = np.ma.array([1, 2, 3]) >>> v = np.ma.array([0, 1, 0]) >>> np.ma.correlate(a, v, mode='valid') masked_array(data=[2], mask=[False], fill_value=999999) Correlation with masked elements: >>> a = np.ma.array([1, 2, 3], mask=[False, True, False]) >>> v = np.ma.array([0, 1, 0]) >>> np.ma.correlate(a, v, mode='valid', propagate_mask=True) masked_array(data=[--], mask=[ True], fill_value=999999, dtype=int64) Correlation with different modes and mixed array types: >>> a = np.ma.array([1, 2, 3]) >>> v = np.ma.array([0, 1, 0]) >>> np.ma.correlate(a, v, mode='full') masked_array(data=[0, 1, 2, 3, 0], mask=[False, False, False, False, False], fill_value=999999)", - "type": "function", - "file_path": "numpy\\numpy\\ma\\core.py", - "ast_data": "FunctionDef name:correlate arguments arg:a arg:v arg:mode arg:propagate_mask Return return:yes" - }, - { - "library": "kornia", - "name": "transform_keypoints_", - "source_code": "def transform_keypoints_(self, M: Tensor) -> 'Keypoints3D': return self.transform_keypoints(M, inplace = True)", - "docstring": "Inplace version of :func:.", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\keypoints.py", - "ast_data": "FunctionDef name:transform_keypoints_ arguments arg:self arg:M type:Tensor Return return:yes" - }, - { - "library": "scipy", - "name": "ihilbert", - "source_code": "def ihilbert(x, _cache = _cache): if isinstance(_cache, threading.local): if not hasattr(_cache, 'ihilbert_cache'): _cache.ihilbert_cache = {} _cache = _cache.ihilbert_cache return -hilbert(x, _cache)", - "docstring": "Return inverse Hilbert transform of a periodic sequence x. If `` are Fourier coefficients of periodic functions x and y, respectively, then:: y_j = -sqrt(-1)*sign(j) * x_j y_0 = 0", - "type": "function", - "file_path": "scipy\\scipy\\fftpack\\_pseudo_diffs.py", - "ast_data": "FunctionDef name:ihilbert arguments arg:x arg:_cache If Call call:isinstance If Assign Assign Return return:yes" - }, - { - "library": "django", - "name": "area", - "source_code": "@property def area(self): return capi.get_area(self.ptr)", - "docstring": "Return the area for a LinearRing, Polygon, or MultiPolygon; 0 otherwise.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", - "ast_data": "FunctionDef name:area arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "resolve_entity", - "source_code": "def resolve_entity(node, source, entity): lines, lineno = tf_inspect.getsourcelines(entity) filepath = tf_inspect.getsourcefile(entity) definition_line = lines[0] col_offset = len(definition_line) - len(definition_line.lstrip()) resolve(node, source, filepath, lineno, col_offset)", - "docstring": "Like resolve, but extracts the context information from an entity.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\origin_info.py", - "ast_data": "FunctionDef name:resolve_entity arguments arg:node arg:source arg:entity Assign Call call:getsourcelines Assign Call call:getsourcefile Assign Assign" - }, - { - "library": "tensorflow", - "name": "build_ring_all_reduce", - "source_code": "def build_ring_all_reduce(input_tensors, num_workers, num_subchunks, gpu_perm, red_op, un_op = None): if len(input_tensors) < 2: raise ValueError('input_tensors must be length 2 or longer') input_tensors, shape = _flatten_tensors(input_tensors) devices = [t.device for t in input_tensors] pred_by_s_d, rank_by_s_d = _ring_permutations(num_workers, num_subchunks, gpu_perm) chunks_by_dev, pad_len = _build_ring_gather(input_tensors, devices, num_subchunks, pred_by_s_d, rank_by_s_d, red_op) if un_op: chunks_by_dev = _apply_unary_to_chunks(un_op, chunks_by_dev) output_tensors = _build_ring_scatter(pred_by_s_d, rank_by_s_d, chunks_by_dev) if pad_len > 0: output_tensors = _strip_padding(output_tensors, pad_len) if len(shape) ! = 1: output_tensors = _reshape_tensors(output_tensors, shape) return output_tensors", - "docstring": "Construct a subgraph performing a ring-style all-reduce of input_tensors. Args: input_tensors: a list of objects, which must all have the same shape and type. num_workers: number of worker tasks spanned by input_tensors. num_subchunks: number of subchunks each device should process in one tick. gpu_perm: a list of ints giving a ring-wise rank ordering of GPUs at each worker. All workers must have the same number of GPUs with the same rank ordering. If NVLINK is available, this should be a ring order supported by NVLINK edges. red_op: a binary operator for elementwise reduction. un_op: an optional unary operator to apply to fully reduced values. Raises: ValueError: empty input_tensors or they don't all have same size. Returns: a list of identical sum-reductions of input_tensors.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py", - "ast_data": "FunctionDef name:build_ring_all_reduce arguments arg:input_tensors arg:num_workers arg:num_subchunks arg:gpu_perm arg:red_op arg:un_op If Compare op:Lt Raise raises:ValueError('input_tensors must be length 2 or longer') Assign Call call:_flatten_tensors Assign Assign Call call:_ring_permutations Assign Call call:_build_ring_gather If Assign Call call:_apply_unary_to_chunks Assign Call call:_build_ring_scatter If Compare op:Gt Assign Call call:_strip_padding If Compare op:NotEq Assign Call call:_reshape_tensors Return return:yes" - }, - { - "library": "tensorflow", - "name": "auto_to_manual_spmd_partition", - "source_code": "def auto_to_manual_spmd_partition(tensor, manual_sharding, single_dim = -1, unspecified_dims = None): return tf2xla.spmd_full_to_shard_shape(tensor, manual_sharding = manual_sharding, dim = single_dim, unspecified_dims = unspecified_dims or [])", - "docstring": "Switches from automatic SPMD partitioning to manual partitioning. Converts a full-shaped tensor (to be automatically partitioned by SPMD partitioner) to a shard-shaped tensor to be consumed by manually partitioned ops. Args: tensor: A tf.Tensor in full shape. manual_sharding: A serialized string of OpSharding to be used in manual partitioning. single_dim: If >= 0, the conversion will happen only on this dim in subgroups. unspecified_dims: An optional list of dimensions unspecified. Returns: A shard-shaped tensor to be consumed by manually partitioned ops.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py", - "ast_data": "FunctionDef name:auto_to_manual_spmd_partition arguments arg:tensor arg:manual_sharding arg:single_dim arg:unspecified_dims Return return:yes" - }, - { - "library": "pandas", - "name": "searchsorted", - "source_code": "def searchsorted(arr: ArrayLike, value: NumpyValueArrayLike | ExtensionArray, side: Literal['left', 'right'] = 'left', sorter: NumpySorter | None = None) -> npt.NDArray[np.intp] | np.intp: if sorter is not None: sorter = ensure_platform_int(sorter) if isinstance(arr, np.ndarray) and arr.dtype.kind in 'iu' and (is_integer(value) or is_integer_dtype(value)): iinfo = np.iinfo(arr.dtype.type) value_arr = np.array([value]) if is_integer(value) else np.array(value) if (value_arr > = iinfo.min).all() and (value_arr < = iinfo.max).all(): dtype = arr.dtype else: dtype = value_arr.dtype if is_integer(value): value = cast(int, dtype.type(value)) else: value = pd_array(cast(ArrayLike, value), dtype = dtype) else: arr = ensure_wrapped_if_datetimelike(arr) return arr.searchsorted(value, side = side, sorter = sorter)", - "docstring": "Find indices where elements should be inserted to maintain order. Find the indices into a sorted array (a) such that, if the corresponding elements in were inserted before the indices, the order of would be preserved. Assuming that is sorted: ====== ================================ returned index satisfies ====== ================================ left `sortersorterarrself`). sorter : 1-D array-like, optional Optional array of integer indices that sort array a into ascending order. They are typically the result of argsort. Returns ------- array of ints or int If value is array-like, array of insertion points. If value is scalar, a single integer. See Also -------- numpy.searchsorted : Similar method from NumPy.", - "type": "function", - "file_path": "pandas\\pandas\\core\\algorithms.py", - "ast_data": "FunctionDef name:searchsorted arguments arg:arr type:ArrayLike arg:value type:NumpyValueArrayLike | ExtensionArray arg:side type:Literal['left', 'right'] arg:sorter type:NumpySorter | None If Compare op:IsNot Assign Call call:ensure_platform_int If BoolOp Call call:isinstance Compare op:In BoolOp Call call:is_integer Call call:is_integer_dtype Assign Call call:iinfo Assign If BoolOp Call call:all Call call:all Assign Assign If Call call:is_integer Assign Call call:cast Assign Call call:pd_array Assign Call call:ensure_wrapped_if_datetimelike Return return:yes" - }, - { - "library": "pandas", - "name": "filter", - "source_code": "def filter(self, func, dropna: bool = True, *args, **kwargs): if isinstance(func, str): wrapper = lambda x: getattr(x, func)(*args, **kwargs) else: wrapper = lambda x: func(x, *args, **kwargs) def true_and_notna(x) -> bool: b = wrapper(x) return notna(b) and b try: indices = [self._get_index(name) for name, group in self._grouper.get_iterator(self._obj_with_exclusions) if true_and_notna(group)] except (ValueError, TypeError) as err: raise TypeError('the filter must return a boolean result') from err filtered = self._apply_filter(indices, dropna) return filtered", - "docstring": "Filter elements from groups that don't satisfy a criterion. Elements from groups are filtered if they do not satisfy the boolean criterion specified by func. Parameters ---------- func : function Criterion to apply to each group. Should return True or False. dropna : bool, optional Drop groups that do not pass the filter. True by default; if False, groups that evaluate False are filled with NaNs. *args : tuple Optional positional arguments to pass to . **kwargs : dict Optional keyword arguments to pass to . Returns ------- Series The filtered subset of the original Series. See Also -------- Series.filter: Filter elements of ungrouped Series. DataFrameGroupBy.filter : Filter elements from groups base on criterion. Notes ----- Functions that mutate the passed object can produce unexpected behavior or errors and are not supported. See :ref: for more details. Examples -------- >>> df = pd.DataFrame( ... { ... \"A\": [\"foo\", \"bar\", \"foo\", \"bar\", \"foo\", \"bar\"], ... \"B\": [1, 2, 3, 4, 5, 6], ... \"C\": [2.0, 5.0, 8.0, 1.0, 2.0, 9.0], ... } ... ) >>> grouped = df.groupby(\"A\") >>> df.groupby(\"A\").B.filter(lambda x: x.mean() > 3.0) 1 2 3 4 5 6 Name: B, dtype: int64", - "type": "method", - "file_path": "pandas\\pandas\\core\\groupby\\generic.py", - "ast_data": "FunctionDef name:filter arguments arg:self arg:func arg:dropna type:bool vararg:args kwarg:kwargs If Call call:isinstance Assign Assign FunctionDef name:true_and_notna arguments arg:x Assign Call call:wrapper Return return:yes Try Assign ExceptHandler Raise raises:TypeError('the filter must return a boolean result') Assign Call call:_apply_filter Return return:yes" - }, - { - "library": "pytorch", - "name": "get_node_submodule_map", - "source_code": "def get_node_submodule_map(self) -> dict[str, str]: return self._node_submodule_map", - "docstring": "Returns a map from node name to submodule name, e.g. node: main_module_impl_impl_over_arch_unary_multiple_embedding _pooling_embedding_pooling_sparse_entity_equivalence_key _proxy_embedding_bag maps to submodule name of: _run_on_acc_1", - "type": "method", - "file_path": "pytorch\\torch\\fx\\passes\\splitter_base.py", - "ast_data": "FunctionDef name:get_node_submodule_map arguments arg:self Return return:yes" - }, - { - "library": "numpy", - "name": "legfromroots", - "source_code": "def legfromroots(roots): return pu._fromroots(legline, legmul, roots)", - "docstring": "Generate a Legendre series with given roots. The function returns the coefficients of the polynomial .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), in Legendre form, where the :math: are the roots specified in . If a zero has multiplicity n, then it must appear in n times. For instance, if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, then looks something like [2, 2, 2, 3, 3]. The roots can appear in any order. If the returned coefficients are , then .. math:: p(x) = c_0 + c_1 * L_1(x) + ... + c_n * L_n(x) The coefficient of the last term is not generally 1 for monic polynomials in Legendre form. Parameters ---------- roots : array_like Sequence containing the roots. Returns ------- out : ndarray 1-D array of coefficients. If all roots are real then is a real array, if some of the roots are complex, then is complex even if all the coefficients in the result are real (see Examples below). See Also -------- numpy.polynomial.polynomial.polyfromroots numpy.polynomial.chebyshev.chebfromroots numpy.polynomial.laguerre.lagfromroots numpy.polynomial.hermite.hermfromroots numpy.polynomial.hermite_e.hermefromroots Examples -------- >>> import numpy.polynomial.legendre as L >>> L.legfromroots((-1,0,1)) # x^3 - x relative to the standard basis array([ 0. , -0.4, 0. , 0.4]) >>> j = complex(0,1) >>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) # may vary", - "type": "function", - "file_path": "numpy\\numpy\\polynomial\\legendre.py", - "ast_data": "FunctionDef name:legfromroots arguments arg:roots Return return:yes" - }, - { - "library": "scikit-learn", - "name": "get_feature_names_out", - "source_code": "def get_feature_names_out(self, input_features = None): check_is_fitted(self, 'n_features_in_') if self.voting = = 'soft' and (not self.flatten_transform): raise ValueError(\"get_feature_names_out is not supported when `voting = 'soft'` and `flatten_transform = False`\") _check_feature_names_in(self, input_features, generate_names = False) class_name = self.__class__.__name__.lower() active_names = [name for name, est in self.estimators if est ! = 'drop'] if self.voting = = 'hard': return np.asarray([f'{class_name}_{name}' for name in active_names], dtype = object) n_classes = len(self.classes_) names_out = [f'{class_name}_{name}{i}' for name in active_names for i in range(n_classes)] return np.asarray(names_out, dtype = object)", - "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Not used, present here for API consistency by convention. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\ensemble\\_voting.py", - "ast_data": "FunctionDef name:get_feature_names_out arguments arg:self arg:input_features If BoolOp Compare op:Eq Raise raises:ValueError(\"get_feature_names_out is not supported when `voting='soft'` and `flatten_transform=False`\") Assign Call call:lower Assign If Compare op:Eq Return return:yes Assign Call call:len Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "rotate_deg_around", - "source_code": "def rotate_deg_around(self, x, y, degrees): x, y = (float(x), float(y)) return self.translate(-x, -y).rotate_deg(degrees).translate(x, y)", - "docstring": "Add a rotation (in degrees) around the point (x, y) in place. Returns *self*, so this method can easily be chained with more calls to :meth:, :meth:, :meth: and :meth:.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", - "ast_data": "FunctionDef name:rotate_deg_around arguments arg:self arg:x arg:y arg:degrees Assign Return return:yes" - }, - { - "library": "pandas", - "name": "typeof_index", - "source_code": "@typeof_impl.register(Index) def typeof_index(val, c) -> IndexType: arrty = typeof_impl(val._numba_data, c) assert arrty.ndim = = 1 return IndexType(arrty.dtype, arrty.layout, type(val))", - "docstring": "This will assume that only strings are in object dtype index. (you should check this before this gets lowered down to numba)", - "type": "function", - "file_path": "pandas\\pandas\\core\\_numba\\extensions.py", - "ast_data": "FunctionDef name:typeof_index arguments arg:val arg:c Call call:register Assign Call call:typeof_impl Return return:yes" - }, - { - "library": "pytorch", - "name": "lp_pool2d", - "source_code": "def lp_pool2d(input: Tensor, norm_type: Union[int, float], kernel_size: BroadcastingList2[int], stride: Optional[BroadcastingList2[int]] = None, ceil_mode: bool = False) -> Tensor: if has_torch_function_unary(input): return handle_torch_function(lp_pool2d, (input,), input, norm_type, kernel_size, stride = stride, ceil_mode = ceil_mode) kw, kh = _pair(kernel_size) if stride is not None: out = avg_pool2d(input.pow(norm_type), kernel_size, stride, 0, ceil_mode) else: out = avg_pool2d(input.pow(norm_type), kernel_size, padding = 0, ceil_mode = ceil_mode) return (torch.sign(out) * relu(torch.abs(out))).mul(kw * kh).pow(1.0 / norm_type)", - "docstring": "Apply a 2D power-average pooling over an input signal composed of several input planes. If the sum of all inputs to the power of is zero, the gradient is set to zero as well. See :class: for details.", - "type": "function", - "file_path": "pytorch\\torch\\nn\\functional.py", - "ast_data": "FunctionDef name:lp_pool2d arguments arg:input type:Tensor arg:norm_type type:Union[int, float] arg:kernel_size type:BroadcastingList2[int] arg:stride type:Optional[BroadcastingList2[int]] arg:ceil_mode type:bool If Call call:has_torch_function_unary Return return:yes Assign Call call:_pair If Compare op:IsNot Assign Call call:avg_pool2d Assign Call call:avg_pool2d Return return:yes" - }, - { - "library": "scipy", - "name": "is_marray", - "source_code": "def is_marray(xp): return 'marray' in xp.__name__", - "docstring": "Returns True if is an MArray namespace; False otherwise.", - "type": "function", - "file_path": "scipy\\scipy\\_lib\\_array_api.py", - "ast_data": "FunctionDef name:is_marray arguments arg:xp Return return:yes" - }, - { - "library": "pytorch", - "name": "indirect_indexing", - "source_code": "def indirect_indexing(self, x: T, size: sympy.Expr, check: bool = True, wrap_neg = True) -> sympy.Expr: raise NotImplementedError", - "docstring": "Convert an integral x into a sympy.Expr that can be subsequently used in indexing computation. 'size' represents an upper bound on what valid indexes can be; when 'check' is True, we check that the x is in bounds. NB: This is typically mandatory to implement for any analysis, because you MUST return a valid sympy.Expr of some sort (even if it's a meaningless symbol).", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\ops_handler.py", - "ast_data": "FunctionDef name:indirect_indexing arguments arg:self arg:x type:T arg:size type:sympy.Expr arg:check type:bool arg:wrap_neg Raise raises:NotImplementedError" - }, - { - "library": "pytorch", - "name": "fill_ordered", - "source_code": "@staticmethod def fill_ordered(sizes, order): assert OrderedSet(range(len(sizes))) = = OrderedSet(order), (sizes, order) next_stride = sympy.S.One strides = [None] * len(order) for i in order: strides[i] = next_stride next_stride = next_stride * sizes[i] return strides", - "docstring": "Create a stride based on the order the dimensions should be filled in. In this format, channels last would be: [1, 3, 2, 0]", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\ir.py", - "ast_data": "FunctionDef name:fill_ordered arguments arg:sizes arg:order Assign Assign For Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "unwrap_tensor_subclass_parameters", - "source_code": "def unwrap_tensor_subclass_parameters(module: torch.nn.Module) -> torch.nn.Module: for name, tensor in itertools.chain(list(module.named_parameters(recurse = False)), list(module.named_buffers(recurse = False))): if is_traceable_wrapper_subclass(tensor): torch.nn.utils.parametrize.register_parametrization(module, name, UnwrapTensorSubclass()) for name, child in module.named_children(): unwrap_tensor_subclass_parameters(child) return module", - "docstring": "Model transformation that replaces all the parameters that are subclasses to plain tensors. This reduces runtime overhead of flattening/unflattening the parameters. This transformation adds parametrization with . The FQNs of the subclass parameters will be changed and state_dict will become incompatible with the original model. E.g. Original model state_dict: {\"p1\": torch.testing._internal.TwoTensor} becomes: {\"parametrizations.p2.original0\": torch.Tensor, \"parametrizations.p2.original1\": torch.Tensor}", - "type": "function", - "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\subclass_parametrization.py", - "ast_data": "FunctionDef name:unwrap_tensor_subclass_parameters arguments arg:module type:torch.nn.Module For Call call:chain If Call call:is_traceable_wrapper_subclass For Call call:named_children Return return:yes" - }, - { - "library": "pandas", - "name": "is_multi_index", - "source_code": "@property def is_multi_index(self) -> bool: return isinstance(self.levels, list)", - "docstring": "the levels attribute is 1 or a list in the case of a multi-index", - "type": "method", - "file_path": "pandas\\pandas\\io\\pytables.py", - "ast_data": "FunctionDef name:is_multi_index arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "autocast", - "source_code": "class autocast(torch.amp.autocast_mode.autocast): @deprecated(\"`torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\", category = FutureWarning) def __init__(self, enabled: bool = True, dtype: torch.dtype = torch.float16, cache_enabled: bool = True): if torch._jit_internal.is_scripting(): self._enabled = enabled self.device = 'cuda' self.fast_dtype = dtype return super().__init__('cuda', enabled = enabled, dtype = dtype, cache_enabled = cache_enabled) def __enter__(self): if torch._jit_internal.is_scripting(): return self return super().__enter__() def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): if torch._jit_internal.is_scripting(): return return super().__exit__(exc_type, exc_val, exc_tb) def __call__(self, func): if torch._jit_internal.is_scripting(): return func return super().__call__(func)", - "docstring": "See :class:. `` instead.", - "type": "class", - "file_path": "pytorch\\torch\\cuda\\amp\\autocast_mode.py", - "ast_data": "ClassDef name:autocast FunctionDef name:__init__ arguments arg:self arg:enabled type:bool arg:dtype type:torch.dtype arg:cache_enabled type:bool Call call:deprecated If Call call:is_scripting Assign Assign Assign Return return:no FunctionDef name:__enter__ arguments arg:self If Call call:is_scripting Return return:yes Return return:yes FunctionDef name:__exit__ arguments arg:self arg:exc_type type:Any arg:exc_val type:Any arg:exc_tb type:Any If Call call:is_scripting Return return:no Return return:yes FunctionDef name:__call__ arguments arg:self arg:func If Call call:is_scripting Return return:yes Return return:yes" - }, - { - "library": "salmon", - "name": "to_string", - "source_code": "def to_string(mail, envelope_header = False): msg = to_message(mail).as_string(envelope_header) return msg", - "docstring": "Returns a canonicalized email string you can use to send or store somewhere.", - "type": "function", - "file_path": "salmon\\salmon\\encoding.py", - "ast_data": "FunctionDef name:to_string arguments arg:mail arg:envelope_header Assign Call call:as_string Return return:yes" - }, - { - "library": "pandas", - "name": "nanmean", - "source_code": "@bottleneck_switch() @_datetimelike_compat def nanmean(values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, mask: npt.NDArray[np.bool_] | None = None) -> float: dtype = values.dtype values, mask = _get_values(values, skipna, fill_value = 0, mask = mask) dtype_sum = _get_dtype_max(dtype) dtype_count = np.dtype(np.float64) if dtype.kind in 'mM': dtype_sum = np.dtype(np.float64) elif dtype.kind in 'iu': dtype_sum = np.dtype(np.float64) elif dtype.kind = = 'f': dtype_sum = dtype dtype_count = dtype count = _get_counts(values.shape, mask, axis, dtype = dtype_count) the_sum = values.sum(axis, dtype = dtype_sum) the_sum = _ensure_numeric(the_sum) if axis is not None and getattr(the_sum, 'ndim', False): count = cast(np.ndarray, count) with np.errstate(all = 'ignore'): the_mean = the_sum / count ct_mask = count = = 0 if ct_mask.any(): the_mean[ct_mask] = np.nan else: the_mean = the_sum / count if count > 0 else np.nan return the_mean", - "docstring": "Compute the mean of the element along an axis ignoring NaNs Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True mask : ndarray[bool], optional nan-mask if known Returns ------- float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, 2, np.nan]) >>> nanops.nanmean(s.values) np.float64(1.5)", - "type": "function", - "file_path": "pandas\\pandas\\core\\nanops.py", - "ast_data": "FunctionDef name:nanmean arguments arg:values type:np.ndarray Call call:bottleneck_switch Assign Assign Call call:_get_values Assign Call call:_get_dtype_max Assign Call call:dtype If Compare op:In Assign Call call:dtype If Compare op:In Assign Call call:dtype If Compare op:Eq Assign Assign Assign Call call:_get_counts Assign Call call:sum Assign Call call:_ensure_numeric If BoolOp Compare op:IsNot Call call:getattr Assign Call call:cast With Assign Assign Compare op:Eq If Call call:any Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "all_max", - "source_code": "def all_max(tensors): return _apply_all_reduce('max', tensors)", - "docstring": "Returns a list of tensors with the all-reduce max across . The computation is done with an all-reduce operation, so if only some of the returned tensors are evaluated then the computation will hang. Args: tensors: The input tensors across which to reduce; must be assigned to GPU devices. Returns: List of tensors, each with the maximum of the input tensors, where tensor i has the same device as .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\nccl_ops.py", - "ast_data": "FunctionDef name:all_max arguments arg:tensors Return return:yes" - }, - { - "library": "tensorflow", - "name": "keras_tensor_from_tensor", - "source_code": "def keras_tensor_from_tensor(tensor): keras_tensor_cls = None for tensor_type, cls in keras_tensor_classes: if isinstance(tensor, tensor_type): keras_tensor_cls = cls break out = keras_tensor_cls.from_tensor(tensor) if hasattr(tensor, '_keras_mask'): out._keras_mask = keras_tensor_from_tensor(tensor._keras_mask) return out", - "docstring": "Convert a traced (composite)tensor to a representative KerasTensor.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py", - "ast_data": "FunctionDef name:keras_tensor_from_tensor arguments arg:tensor Assign For If Call call:isinstance Assign Assign Call call:from_tensor If Call call:hasattr Assign Call call:keras_tensor_from_tensor Return return:yes" - }, - { - "library": "pytorch", - "name": "strip_local_scope", - "source_code": "def strip_local_scope(s: str) -> str: import re pattern = 'L\\\\[\\\\s*[\\'\\\\\"](.*?)[\\'\\\\\"]\\\\s*\\\\]' return re.sub(pattern, '\\\\1', s)", - "docstring": "Replace occurrences of L[...] with just the inner content. Handles both single and double quotes. This is to generate user friendly recompilation messages.", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\guards.py", - "ast_data": "FunctionDef name:strip_local_scope arguments arg:s type:str Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "starting_wall_time", - "source_code": "def starting_wall_time(self): return self._reader.starting_wall_time()", - "docstring": "Wall timestamp for when the debugged TensorFlow program started. Returns: Stating wall time as seconds since the epoch, as a .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", - "ast_data": "FunctionDef name:starting_wall_time arguments arg:self Return return:yes" - }, - { - "library": "mongo", - "name": "try_next", - "source_code": "@_csot.apply async def try_next(self) -> Optional[_DocumentType]: if not self._closed and (not self._cursor.alive): await self._resume() try: try: change = await self._cursor._try_next(True) except PyMongoError as exc: if not _resumable(exc): raise await self._resume() change = await self._cursor._try_next(False) except PyMongoError as exc: if not _resumable(exc) and (not exc.timeout): await self.close() raise except BaseException: await self.close() raise if not self._cursor.alive: self._closed = True if change is None: if self._cursor._post_batch_resume_token is not None: self._resume_token = self._cursor._post_batch_resume_token self._start_at_operation_time = None return change try: resume_token = change['_id'] except KeyError: await self.close() raise InvalidOperation('Cannot provide resume functionality when the resume token is missing.') from None if not self._cursor._has_next() and self._cursor._post_batch_resume_token: resume_token = self._cursor._post_batch_resume_token self._uses_start_after = False self._uses_resume_after = True self._resume_token = resume_token self._start_at_operation_time = None if self._decode_custom: return _bson_to_dict(change.raw, self._orig_codec_options) return change", - "docstring": "Advance the cursor without blocking indefinitely. This method returns the next change document without waiting indefinitely for the next change. For example:: async with await db.collection.watch() as stream: while stream.alive: change = await stream.try_next() # Note that the AsyncChangeStream's resume token may be updated # even when no changes are returned. print(\"Current resume token: %r\" % (stream.resume_token,)) if change is not None: print(\"Change document: %r\" % (change,)) continue # We end up here when there are no recent changes. # Sleep for a while before trying again to avoid flooding # the server with getMore requests when no changes are # available. asyncio.sleep(10) If no change document is cached locally then this method runs a single getMore command. If the getMore yields any documents, the next document is returned, otherwise, if the getMore returns no documents (because there have been no changes) then `` when no document is available after running a single getMore or when the cursor is closed. .. versionadded:: 3.8", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\change_stream.py", - "ast_data": "AsyncFunctionDef name:try_next arguments arg:self If BoolOp Try Try Assign ExceptHandler If Raise Assign ExceptHandler If BoolOp Raise ExceptHandler Raise If Assign If Compare op:Is If Compare op:IsNot Assign Assign Return return:yes Try Assign ExceptHandler Raise raises:InvalidOperation('Cannot provide resume functionality when the resume token is missing.') If BoolOp Assign Assign Assign Assign Assign If Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "add_tool", - "source_code": "def add_tool(self, name, tool, *args, **kwargs): tool_cls = backend_tools._find_tool_class(type(self.canvas), tool) if not tool_cls: raise ValueError('Impossible to find class for %s' % str(tool)) if name in self._tools: _api.warn_external('A \"Tool class\" with the same name already exists, not added') return self._tools[name] tool_obj = tool_cls(self, name, *args, **kwargs) self._tools[name] = tool_obj if tool_obj.default_keymap is not None: self.update_keymap(name, tool_obj.default_keymap) if isinstance(tool_obj, backend_tools.ToolToggleBase): if tool_obj.radio_group is None: self._toggled.setdefault(None, set()) else: self._toggled.setdefault(tool_obj.radio_group, None) if tool_obj.toggled: self._handle_toggle(tool_obj, None, None) tool_obj.set_figure(self.figure) event = ToolEvent('tool_added_event', self, tool_obj) self._callbacks.process(event.name, event) return tool_obj", - "docstring": "Add *tool* to . If successful, adds a new event `` is the *name* of the tool; the event is fired every time the tool is triggered. Parameters ---------- name : str Name of the tool, treated as the ID, has to be unique. tool : type Class of the tool to be added. A subclass will be used instead if one was registered for the current canvas class. *args, **kwargs Passed to the *tool*'s constructor. See Also -------- matplotlib.backend_tools.ToolBase : The base class for tools.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backend_managers.py", - "ast_data": "FunctionDef name:add_tool arguments arg:self arg:name arg:tool vararg:args kwarg:kwargs Assign Call call:_find_tool_class If Raise raises:ValueError('Impossible to find class for %s' % str(tool)) If Compare op:In Return return:yes Assign Call call:tool_cls Assign If Compare op:IsNot If Call call:isinstance If Compare op:Is If Assign Call call:ToolEvent Return return:yes" - }, - { - "library": "matplotlib", - "name": "contains", - "source_code": "def contains(self, mouseevent, radius = None): if self._different_canvas(mouseevent): return (False, {}) radius = self._process_radius(radius) codes = self.get_path().codes if codes is not None: vertices = self.get_path().vertices idxs, = np.where(codes = = Path.MOVETO) idxs = idxs[1:] subpaths = map(Path, np.split(vertices, idxs), np.split(codes, idxs)) else: subpaths = [self.get_path()] inside = any((subpath.contains_point((mouseevent.x, mouseevent.y), self.get_transform(), radius) for subpath in subpaths)) return (inside, {})", - "docstring": "Test whether the mouse event occurred in the patch. Parameters ---------- mouseevent : Where the user clicked. radius : float, optional Additional margin on the patch in target coordinates of . See for further details. If , the default value depends on the state of the object: - If is a number, the default is that value. This is so that picking works as expected. - Otherwise if the edge color has a non-zero alpha, the default is half of the linewidth. This is so that all the colored pixels are \"in\" the patch. - Finally, if the edge has 0 alpha, the default is 0. This is so that patches without a stroked edge do not have points outside of the filled region report as \"in\" due to an invisible edge. Returns ------- (bool, empty dict)", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:contains arguments arg:self arg:mouseevent arg:radius If Call call:_different_canvas Return return:yes Assign Call call:_process_radius Assign If Compare op:IsNot Assign Assign Call call:where Assign Assign Call call:map Assign Assign Call call:any Return return:yes" - }, - { - "library": "django", - "name": "lookup_spawns_duplicates", - "source_code": "def lookup_spawns_duplicates(opts, lookup_path): lookup_fields = lookup_path.split(LOOKUP_SEP) for field_name in lookup_fields: if field_name = = 'pk': field_name = opts.pk.name try: field = opts.get_field(field_name) except FieldDoesNotExist: continue else: if hasattr(field, 'path_infos'): path_info = field.path_infos opts = path_info[-1].to_opts if any((path.m2m for path in path_info)): return True return False", - "docstring": "Return True if the given lookup path spawns duplicates.", - "type": "function", - "file_path": "django\\django\\contrib\\admin\\utils.py", - "ast_data": "FunctionDef name:lookup_spawns_duplicates arguments arg:opts arg:lookup_path Assign Call call:split For If Compare op:Eq Assign Try Assign Call call:get_field ExceptHandler If Call call:hasattr Assign Assign If Call call:any Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "range", - "source_code": "@contextmanager def range(msg, *args, **kwargs): range_push(msg.format(*args, **kwargs)) try: yield finally: range_pop()", - "docstring": "Context manager / decorator that pushes an ITT range at the beginning of its scope, and pops it at the end. If extra arguments are given, they are passed as arguments to msg.format(). Args: msg (str): message to associate with the range", - "type": "function", - "file_path": "pytorch\\torch\\profiler\\itt.py", - "ast_data": "FunctionDef name:range arguments arg:msg vararg:args kwarg:kwargs Try" - }, - { - "library": "tensorflow", - "name": "value_rowids", - "source_code": "def value_rowids(self, name = None): with ops.name_scope(name, 'RaggedValueRowIds', [self]): return self._row_partition.value_rowids()", - "docstring": "Returns the row indices for the in this ragged tensor. corresponds one-to-one with the outermost dimension of , and specifies the row containing each value. In particular, the row consists of the values where . Args: name: A name prefix for the returned tensor (optional). Returns: A 1-D integer with shape . The returned tensor is nonnegative, and is sorted in ascending order. #### Example: >>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []]) >>> print(rt.values) tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32) >>> print(rt.value_rowids()) # corresponds 1:1 with rt.values tf.Tensor([0 0 0 0 2 2 2 3], shape=(8,), dtype=int64)", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py", - "ast_data": "FunctionDef name:value_rowids arguments arg:self arg:name With Return return:yes" - }, - { - "library": "matplotlib", - "name": "update_position", - "source_code": "def update_position(self, loc): raise NotImplementedError('Derived must override')", - "docstring": "Set the location of tick in data coords with scalar *loc*.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axis.py", - "ast_data": "FunctionDef name:update_position arguments arg:self arg:loc Raise raises:NotImplementedError('Derived must override')" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, base = 1.0, offset = 0.0): self._edge = _Edge_integer(base, 0) self._offset = offset", - "docstring": "Parameters ---------- base : float > 0, default: 1.0 Interval between ticks. offset : float, default: 0.0 Value added to each multiple of *base*. .. versionadded:: 3.8", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:base arg:offset Assign Call call:_Edge_integer Assign" - }, - { - "library": "tensorflow", - "name": "build_nccl_then_recursive_hd", - "source_code": "def build_nccl_then_recursive_hd(input_tensors, red_op, un_op = None): upper_level_f = lambda x: build_recursive_hd_all_reduce(x, red_op, un_op) return _build_nccl_hybrid(input_tensors, red_op, upper_level_f)", - "docstring": "Construct hybrid of NCCL within workers, Recursive-HD across workers.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py", - "ast_data": "FunctionDef name:build_nccl_then_recursive_hd arguments arg:input_tensors arg:red_op arg:un_op Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_capstyle", - "source_code": "@_docstring.interpd def set_capstyle(self, cs): self._capstyle = CapStyle(cs)", - "docstring": "Set the for the collection (for all its elements). Parameters ---------- cs : or %(CapStyle)s", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\collections.py", - "ast_data": "FunctionDef name:set_capstyle arguments arg:self arg:cs Assign Call call:CapStyle" - }, - { - "library": "scipy", - "name": "Schwefel04", - "source_code": "class Schwefel04(Benchmark): change_dimensionality = True def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([0.0] * self.N, [10.0] * self.N)) self.custom_bounds = ([0.0, 2.0], [0.0, 2.0]) self.global_optimum = [[1.0 for _ in range(self.N)]] self.fglob = 0.0 def fun(self, x, *args): self.nfev + = 1 return sum((x - 1.0) ** 2.0 + (x[0] - x ** 2.0) ** 2.0)", - "docstring": "Schwefel 4 objective function. This class defines the Schwefel 4 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Schwefel04}}(x) = \\sum_{i=1}^n \\left[(x_i - 1)^2 + (x_1 - x_i^2)^2 \\right] Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for:math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py", - "ast_data": "ClassDef name:Schwefel04 Assign FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_const_dim_count", - "source_code": "def get_const_dim_count(node_def): const_value = values_from_const(node_def) return const_value.ndim", - "docstring": "Get the number of dimensions for a Const node. Args: node_def: Const NodeDef. Returns: Number of dimensions for the Const node.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\tools\\optimize_for_inference_lib.py", - "ast_data": "FunctionDef name:get_const_dim_count arguments arg:node_def Assign Call call:values_from_const Return return:yes" - }, - { - "library": "tensorflow", - "name": "set_epsilon", - "source_code": "def set_epsilon(value): global _EPSILON _EPSILON = value", - "docstring": "Sets the value of the fuzz factor used in numeric expressions. Args: value: float. New value of epsilon. Example: >>> tf.keras.backend.epsilon() 1e-07 >>> tf.keras.backend.set_epsilon(1e-5) >>> tf.keras.backend.epsilon() 1e-05 >>> tf.keras.backend.set_epsilon(1e-7)", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\backend_config.py", - "ast_data": "FunctionDef name:set_epsilon arguments arg:value Assign" - }, - { - "library": "matplotlib", - "name": "transformed", - "source_code": "def transformed(self, transform): pts = self.get_points() ll, ul, lr = transform.transform(np.array([pts[0], [pts[0, 0], pts[1, 1]], [pts[1, 0], pts[0, 1]]])) return Bbox([ll, [lr[0], ul[1]]])", - "docstring": "Construct a by statically transforming this one by *transform*.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", - "ast_data": "FunctionDef name:transformed arguments arg:self arg:transform Assign Call call:get_points Assign Call call:transform Return return:yes" - }, - { - "library": "tensorflow", - "name": "exact_laplacian_kernel", - "source_code": "def exact_laplacian_kernel(x, y, stddev): x_aligned, y_aligned = _align_matrices(x, y) diff_l1_norm = math_ops.reduce_sum(math_ops.abs(math_ops.subtract(x_aligned, y_aligned)), 2) return math_ops.exp(-diff_l1_norm / stddev)", - "docstring": "Computes exact Laplacian kernel value(s) for tensors x and y using stddev. The Laplacian kernel for vectors u, v is defined as follows: K(u, v) = exp(-||u-v|| / stddev) where the norm is the l1-norm. x, y can be either vectors or matrices. If they are vectors, they must have the same dimension. If they are matrices, they must have the same number of columns. In the latter case, the method returns (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and v is a row from y. Args: x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim]. y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim]. stddev: The width of the Gaussian kernel. Returns: A single value (scalar) with shape (1, 1) if x, y are vectors or a matrix of shape (m, n) with entries K(u, v) (where K is the Laplacian kernel) for all (u,v) pairs where u, v are rows from x and y respectively. Raises: ValueError: if the shapes of x, y are not compatible.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\kernelized_utils.py", - "ast_data": "FunctionDef name:exact_laplacian_kernel arguments arg:x arg:y arg:stddev Assign Call call:_align_matrices Assign Call call:reduce_sum Return return:yes" - }, - { - "library": "pytorch", - "name": "is_registered_op", - "source_code": "def is_registered_op(self, name: str, version: int) -> bool: functions = self.get_function_group(name) if functions is None: return False return functions.get(version) is not None", - "docstring": "Returns whether the given op is registered for the given opset version.", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\_internal\\registration.py", - "ast_data": "FunctionDef name:is_registered_op arguments arg:self arg:name type:str arg:version type:int Assign Call call:get_function_group If Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "join_phase", - "source_code": "def join_phase(self, expected_version): active_version, this_rank = self.join_rendezvous(expected_version) state = json.loads(active_version.value) logger.info('Joined rendezvous version %s as rank %s. Full state: %s', state['version'], this_rank, state) if this_rank = = self._num_min_workers - 1 and state['status'] = = 'joinable': logger.info('Rank %s is responsible for join last call.', this_rank) last_call_deadline = time.time() + self._last_call_timeout self.handle_join_last_call(expected_version, last_call_deadline) logger.info('Rank %s finished join last call.', this_rank) logger.info('Waiting for remaining peers.') active_version = self.wait_for_peers(expected_version) state = json.loads(active_version.value) assert state['version'] = = expected_version, 'Logic error: failed to observe version mismatch' return self.confirm_phase(expected_version, this_rank)", - "docstring": "We observed a rendezvous state in 'joinable' state, and attempt to join this particular version, and then wait for all other peers to join.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\etcd_rendezvous.py", - "ast_data": "FunctionDef name:join_phase arguments arg:self arg:expected_version Assign Call call:join_rendezvous Assign Call call:loads If BoolOp Compare op:Eq Compare op:Eq Assign Assign Call call:wait_for_peers Assign Call call:loads Return return:yes" - }, - { - "library": "scrapy", - "name": "handshakeCompleted", - "source_code": "def handshakeCompleted(self) -> None: assert self.transport is not None if self.transport.negotiatedProtocol is not None and self.transport.negotiatedProtocol ! = PROTOCOL_NAME: self._lose_connection_with_error([InvalidNegotiatedProtocol(self.transport.negotiatedProtocol)])", - "docstring": "Close the connection if it's not made via the expected protocol", - "type": "method", - "file_path": "scrapy\\scrapy\\core\\http2\\protocol.py", - "ast_data": "FunctionDef name:handshakeCompleted arguments arg:self If BoolOp Compare op:IsNot Compare op:NotEq" - }, - { - "library": "pytorch", - "name": "__init__", - "source_code": "def __init__(self, memory_profile): self.timeline = memory_profile.timeline self.categories = memory_profile._categories", - "docstring": "The minimum representation of the memory profile timeline includes the memory timeline and categories. The timeline consists of [timestamp, action, (TensorKey, version), numbytes] elements, to denote any actions (pre-existing, create, destroy, or increment_version) that occurred to a specific Tensor for a chunk of memory. The categories help map each (TensorKey, version) pair into a category.", - "type": "method", - "file_path": "pytorch\\torch\\profiler\\_memory_profiler.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:memory_profile Assign Assign" - }, - { - "library": "mongo", - "name": "pool_cleared", - "source_code": "def pool_cleared(self, event: PoolClearedEvent) -> None: raise NotImplementedError", - "docstring": "Abstract method to handle a . Emitted when a connection Pool is cleared. :param event: An instance of :class:.", - "type": "method", - "file_path": "mongo\\pymongo\\monitoring.py", - "ast_data": "FunctionDef name:pool_cleared arguments arg:self arg:event type:PoolClearedEvent Raise raises:NotImplementedError" - }, - { - "library": "kornia", - "name": "MotionBlur", - "source_code": "class MotionBlur(Module): def __init__(self, kernel_size: int, angle: float, direction: float, border_type: str = 'constant', mode: str = 'nearest') -> None: super().__init__() self.kernel_size = kernel_size self.angle = angle self.direction = direction self.border_type = border_type self.mode = mode def __repr__(self) -> str: return f'{self.__class__.__name__} (kernel_size = {self.kernel_size}, angle = {self.angle}, direction = {self.direction}, border_type = {self.border_type})' def forward(self, x: Tensor) -> Tensor: return motion_blur(x, self.kernel_size, self.angle, self.direction, self.border_type)", - "docstring": "Blur 2D images (4D tensor) using the motion filter. Args: kernel_size: motion kernel width and height. It should be odd and positive. angle: angle of the motion blur in degrees (anti-clockwise rotation). direction: forward/backward direction of the motion blur. Lower values towards -1.0 will point the motion blur towards the back (with angle provided via angle), while higher values towards 1.0 will point the motion blur forward. A value of 0.0 leads to a uniformly (but still angled) motion blur. border_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)(B, C, H, W)` Examples: >>> input = torch.rand(2, 4, 5, 7) >>> motion_blur = MotionBlur(3, 35., 0.5) >>> output = motion_blur(input) # 2x4x5x7", - "type": "class", - "file_path": "kornia\\kornia\\filters\\motion.py", - "ast_data": "ClassDef name:MotionBlur FunctionDef name:__init__ arguments arg:self arg:kernel_size type:int arg:angle type:float arg:direction type:float arg:border_type type:str arg:mode type:str Assign Assign Assign Assign Assign FunctionDef name:__repr__ arguments arg:self Return return:yes FunctionDef name:forward arguments arg:self arg:x type:Tensor Return return:yes" - }, - { - "library": "tensorflow", - "name": "should_invoke_op_callbacks", - "source_code": "def should_invoke_op_callbacks(): ctx = context.context() return ctx.op_callbacks and (not ctx.invoking_op_callbacks)", - "docstring": "Determine if op callbacks are present and should be invoked. Returns: A thread-local result (boolean) indicating whether any op callback(s) exist and should be invoked.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\op_callbacks.py", - "ast_data": "FunctionDef name:should_invoke_op_callbacks arguments Assign Call call:context Return return:yes" - }, - { - "library": "cryptography", - "name": "parameters", - "source_code": "@abc.abstractmethod def parameters(self) -> DSAParameters: pass", - "docstring": "The DSAParameters object associated with this public key.", - "type": "method", - "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\dsa.py", - "ast_data": "FunctionDef name:parameters arguments arg:self" - }, - { - "library": "pytorch", - "name": "script_qconfig", - "source_code": "def script_qconfig(qconfig): return QConfig(activation = torch.jit.script(qconfig.activation())._c, weight = torch.jit.script(qconfig.weight())._c)", - "docstring": "Instantiate the activation and weight observer modules and script them, these observer module instances will be deepcopied during prepare_jit step.", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\quantize_jit.py", - "ast_data": "FunctionDef name:script_qconfig arguments arg:qconfig Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_label_mode", - "source_code": "def set_label_mode(self, mode): _api.check_in_list(['all', 'L', '1', 'keep'], mode = mode) if mode = = 'keep': return for i, j in np.ndindex(self._nrows, self._ncols): try: ax = self.axes_row[i][j] except IndexError: continue if isinstance(ax.axis, MethodType): bottom_axis = SimpleAxisArtist(ax.xaxis, 1, ax.spines['bottom']) left_axis = SimpleAxisArtist(ax.yaxis, 1, ax.spines['left']) else: bottom_axis = ax.axis['bottom'] left_axis = ax.axis['left'] display_at_bottom = i = = self._nrows - 1 if mode = = 'L' else i = = self._nrows - 1 and j = = 0 if mode = = '1' else True display_at_left = j = = 0 if mode = = 'L' else i = = self._nrows - 1 and j = = 0 if mode = = '1' else True bottom_axis.toggle(ticklabels = display_at_bottom, label = display_at_bottom) left_axis.toggle(ticklabels = display_at_left, label = display_at_left)", - "docstring": "Define which axes have tick labels. Parameters ---------- mode : {\"L\", \"1\", \"all\", \"keep\"} The label mode: - \"L\": All axes on the left column get vertical tick labels; all axes on the bottom row get horizontal tick labels. - \"1\": Only the bottom left axes is labelled. - \"all\": All axes are labelled. - \"keep\": Do not do anything.", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_grid.py", - "ast_data": "FunctionDef name:set_label_mode arguments arg:self arg:mode If Compare op:Eq Return return:no For Call call:ndindex Try Assign ExceptHandler If Call call:isinstance Assign Call call:SimpleAxisArtist Assign Call call:SimpleAxisArtist Assign Assign Assign Assign" - }, - { - "library": "sphinx", - "name": "iscoroutinefunction", - "source_code": "def iscoroutinefunction(obj: Any) -> TypeIs[Callable[..., types.CoroutineType[Any, Any, Any]]]: obj = unwrap_all(obj, stop = _is_wrapped_coroutine) return inspect.iscoroutinefunction(obj)", - "docstring": "Check if the object is a :external+python:term: function.", - "type": "function", - "file_path": "sphinx\\sphinx\\util\\inspect.py", - "ast_data": "FunctionDef name:iscoroutinefunction arguments arg:obj type:Any Assign Call call:unwrap_all Return return:yes" - }, - { - "library": "pandas", - "name": "SpecificationError", - "source_code": "class SpecificationError(Exception): pass", - "docstring": "Exception raised by `` on a Dataframe with duplicated functions names without assigning column name. See Also -------- DataFrame.agg : Aggregate using one or more operations over the specified axis. Series.agg : Aggregate using one or more operations over the specified axis. Examples -------- >>> df = pd.DataFrame({\"A\": [1, 1, 1, 2, 2], \"B\": range(5), \"C\": range(5)}) >>> df.groupby(\"A\").B.agg({\"foo\": \"count\"}) # doctest: +SKIP ... # SpecificationError: nested renamer is not supported >>> df.groupby(\"A\").agg({\"B\": {\"foo\": [\"sum\", \"max\"]}}) # doctest: +SKIP ... # SpecificationError: nested renamer is not supported >>> df.groupby(\"A\").agg([\"min\", \"min\"]) # doctest: +SKIP ... # SpecificationError: nested renamer is not supported", - "type": "class", - "file_path": "pandas\\pandas\\errors\\__init__.py", - "ast_data": "ClassDef name:SpecificationError" - }, - { - "library": "pandas", - "name": "generate_table", - "source_code": "def generate_table(self) -> tuple[dict[str, tuple[int, int]], DataFrame]: gso_table = self._gso_table gso_df = self.df columns = list(gso_df.columns) selected = gso_df[self.columns] col_index = [(col, columns.index(col)) for col in self.columns] keys = np.empty(selected.shape, dtype = np.uint64) for o, (idx, row) in enumerate(selected.iterrows()): for j, (col, v) in enumerate(col_index): val = row[col] val = '' if isna(val) else val key = gso_table.get(val, None) if key is None: key = (v + 1, o + 1) gso_table[val] = key keys[o, j] = self._convert_key(key) for i, col in enumerate(self.columns): gso_df[col] = keys[:, i] return (gso_table, gso_df)", - "docstring": "Generates the GSO lookup table for the DataFrame Returns ------- gso_table : dict Ordered dictionary using the string found as keys and their lookup position (v,o) as values gso_df : DataFrame DataFrame where strl columns have been converted to (v,o) values Notes ----- Modifies the DataFrame in-place. The DataFrame returned encodes the (v,o) values as uint64s. The encoding depends on the dta version, and can be expressed as enc = v + o * 2 ** (o_size * 8) so that v is stored in the lower bits and o is in the upper bits. o_size is * 117: 4 * 118: 6 * 119: 5", - "type": "method", - "file_path": "pandas\\pandas\\io\\stata.py", - "ast_data": "FunctionDef name:generate_table arguments arg:self Assign Assign Assign Call call:list Assign Assign Assign Call call:empty For Call call:enumerate For Call call:enumerate Assign Assign Assign Call call:get If Compare op:Is Assign Assign Assign Call call:_convert_key For Call call:enumerate Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "shape", - "source_code": "@property def shape(self): nrows = self._row_partition.static_nrows ncols = self._row_partition.static_uniform_row_length value_shape = self._values.shape[1:] return tensor_shape.TensorShape([nrows, ncols]).concatenate(value_shape)", - "docstring": "The statically known shape of this ragged tensor. Returns: A containing the statically known shape of this ragged tensor. Ragged dimensions have a size of . Examples: >>> tf.ragged.constant([[0], [1, 2]]).shape TensorShape([2, None]) >>> tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).shape TensorShape([2, None, 2])", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py", - "ast_data": "FunctionDef name:shape arguments arg:self Assign Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "on_predict_end", - "source_code": "@doc_controls.for_subclass_implementers def on_predict_end(self, logs = None): pass", - "docstring": "Called at the end of prediction. Subclasses should override for any actions to run. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", - "ast_data": "FunctionDef name:on_predict_end arguments arg:self arg:logs" - }, - { - "library": "mongo", - "name": "distinct", - "source_code": "def distinct(self, key: str) -> list: options: dict[str, Any] = {} if self._spec: options['query'] = self._spec if self._max_time_ms is not None: options['maxTimeMS'] = self._max_time_ms if self._comment: options['comment'] = self._comment if self._collation is not None: options['collation'] = self._collation return self._collection.distinct(key, session = self._session, **options)", - "docstring": "Get a list of distinct values for among all documents in the result set of this query. Raises :class: if is not an instance of :class:. The :meth: method obeys the :attr: of the :class: instance on which :meth: was called. :param key: name of key for which we want to get the distinct values .. seealso:: :meth:", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\cursor.py", - "ast_data": "FunctionDef name:distinct arguments arg:self arg:key type:str If Assign If Compare op:IsNot Assign If Assign If Compare op:IsNot Assign Return return:yes" - }, - { - "library": "scikit-learn", - "name": "DataConversionWarning", - "source_code": "class DataConversionWarning(UserWarning): pass", - "docstring": "Warning used to notify implicit data conversions happening in the code. This warning occurs when some input data needs to be converted or interpreted in a way that may not match the user's expectations. For example, this warning may occur when the user - passes an integer array to a function which expects float input and will convert the input - requests a non-copying operation, but a copy is required to meet the implementation's data-type expectations; - passes an input whose shape can be interpreted ambiguously. .. versionchanged:: 0.18 Moved from sklearn.utils.validation.", - "type": "class", - "file_path": "scikit-learn\\sklearn\\exceptions.py", - "ast_data": "ClassDef name:DataConversionWarning" - }, - { - "library": "pytorch", - "name": "export_stacks", - "source_code": "def export_stacks(self, path: str, metric: str = 'self_cpu_time_total'): assert self.profiler return self.profiler.export_stacks(path, metric)", - "docstring": "Save stack traces to a file Args: path (str): save stacks file to this location; metric (str): metric to use: \"self_cpu_time_total\" or \"self_cuda_time_total\"", - "type": "method", - "file_path": "pytorch\\torch\\profiler\\profiler.py", - "ast_data": "FunctionDef name:export_stacks arguments arg:self arg:path type:str arg:metric type:str Return return:yes" - }, - { - "library": "kornia", - "name": "z", - "source_code": "@property def z(self) -> Tensor: return self._z", - "docstring": "Return the underlying data with shape :math:.", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\liegroup\\so2.py", - "ast_data": "FunctionDef name:z arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "cpenmsg", - "source_code": "def cpenmsg(solver, iprint, cpen): if abs(iprint) < 2: return elif iprint > 0: fname = '' else: fname = f'{solver.strip()}_output.txt' if abs(iprint) > = 3: message = f'\\nSet CPEN to {cpen}' else: message = f'\\n\\nSet CPEN to {cpen}' if len(fname) > 0: with open(fname, 'a') as f: f.write(message) else: print(message)", - "docstring": "This function prints a message when CPEN is updated.", - "type": "function", - "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\message.py", - "ast_data": "FunctionDef name:cpenmsg arguments arg:solver arg:iprint arg:cpen If Compare op:Lt Return return:no If Compare op:Gt Assign Assign If Compare op:GtE Assign Assign If Compare op:Gt With" - }, - { - "library": "scikit-learn", - "name": "score", - "source_code": "def score(self, X, y, sample_weight = None): if X is None: X = np.zeros(shape = (len(y), 1)) return super().score(X, y, sample_weight)", - "docstring": "Return the coefficient of determination R^2 of the prediction. The coefficient R^2 is defined as , where is the residual sum of squares and is the total sum of squares . The best possible score is 1.0 and it can be negative (because the model can be arbitrarily worse). A constant model that always predicts the expected value of y, disregarding the input features, would get a R^2 score of 0.0. Parameters ---------- X : None or array-like of shape (n_samples, n_features) Test samples. Passing None as test samples gives the same result as passing real test samples, since operates independently of the sampled observations. y : array-like of shape (n_samples,) or (n_samples, n_outputs) True values for X. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- score : float R^2 of w.r.t. y.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\dummy.py", - "ast_data": "FunctionDef name:score arguments arg:self arg:X arg:y arg:sample_weight If Compare op:Is Assign Call call:zeros Return return:yes" - }, - { - "library": "pytorch", - "name": "set_default_tensor_type", - "source_code": "def set_default_tensor_type(t: _Union[type['torch.Tensor'], str], /) -> None: if isinstance(t, str): t = _import_dotted_name(t) _C._set_default_tensor_type(t)", - "docstring": ".. warning:: This function is deprecated as of PyTorch 2.1, please use :func: and :func: as alternatives. Sets the default `torch.tensor`. Args: t (type or string): the floating point tensor type or its name Example:: >>> # xdoctest: +SKIP(\"Other tests may have changed the default type. Can we reset it?\") >>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32 torch.float32 >>> torch.set_default_tensor_type(torch.DoubleTensor) >>> torch.tensor([1.2, 3]).dtype # a new floating point tensor torch.float64", - "type": "function", - "file_path": "pytorch\\torch\\__init__.py", - "ast_data": "FunctionDef name:set_default_tensor_type arguments If Call call:isinstance Assign Call call:_import_dotted_name" - }, - { - "library": "mongo", - "name": "max_await_time_ms", - "source_code": "def max_await_time_ms(self, max_await_time_ms: Optional[int]) -> Cursor[_DocumentType]: if not isinstance(max_await_time_ms, int) and max_await_time_ms is not None: raise TypeError(f'max_await_time_ms must be an integer or None, not {type(max_await_time_ms)}') self._check_okay_to_chain() if self._query_flags & CursorType.TAILABLE_AWAIT: self._max_await_time_ms = max_await_time_ms return self", - "docstring": "Specifies a time limit for a getMore operation on a :attr: cursor. For all other types of cursor max_await_time_ms is ignored. Raises :exc: if is not an integer or `~pymongo.errors.InvalidOperationCursormax_await_time_ms` requires server version **>= 3.2** :param max_await_time_ms: the time limit after which the operation is aborted .. versionadded:: 3.2", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\cursor.py", - "ast_data": "FunctionDef name:max_await_time_ms arguments arg:self arg:max_await_time_ms type:Optional[int] If BoolOp Compare op:IsNot Raise raises:TypeError(f'max_await_time_ms must be an integer or None, not {type(max_await_time_ms)}') If Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_width_char", - "source_code": "def get_width_char(self, c, isord = False): if not isord: c = ord(c) return self._metrics[c].width", - "docstring": "Get the width of the character from the character metric WX field.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\_afm.py", - "ast_data": "FunctionDef name:get_width_char arguments arg:self arg:c arg:isord If Assign Call call:ord Return return:yes" - }, - { - "library": "mongo", - "name": "__getstate__", - "source_code": "def __getstate__(self) -> dict[str, Any]: return {'mode': self.__mode, 'tag_sets': self.__tag_sets, 'max_staleness': self.__max_staleness, 'hedge': self.__hedge}", - "docstring": "Return value of object for pickling. Needed explicitly because __slots__() defined.", - "type": "method", - "file_path": "mongo\\pymongo\\read_preferences.py", - "ast_data": "FunctionDef name:__getstate__ arguments arg:self Return return:yes" - }, - { - "library": "coconut", - "name": "compile_file", - "source_code": "def compile_file(self, filepath, write = True, package = False, force = False, **kwargs): set_ext = False if write is False: destpath = None elif write is True: destpath = filepath set_ext = True elif os.path.splitext(write)[1]: destpath = write else: destpath = os.path.join(write, os.path.basename(filepath)) set_ext = True if set_ext: base, ext = os.path.splitext(os.path.splitext(destpath)[0]) if not ext: ext = comp_ext destpath = fixpath(base + ext) if filepath = = destpath: raise CoconutException('cannot compile ' + showpath(filepath) + ' to itself', extra = 'incorrect file extension') if destpath is not None: dest_ext = os.path.splitext(destpath)[1] if dest_ext in code_exts: if force: logger.warn('found destination path with ' + dest_ext + ' extension; compiling anyway due to --force') else: raise CoconutException('found destination path with ' + dest_ext + ' extension; aborting compilation', extra = 'pass --force to override') self.compile(filepath, destpath, package, force = force, **kwargs) return destpath", - "docstring": "Compile a file and return the compiled file's path.", - "type": "method", - "file_path": "coconut\\coconut\\command\\command.py", - "ast_data": "FunctionDef name:compile_file arguments arg:self arg:filepath arg:write arg:package arg:force kwarg:kwargs Assign If Compare op:Is Assign If Compare op:Is Assign Assign If Assign Assign Call call:join Assign If Assign Call call:splitext If Assign Assign Call call:fixpath If Compare op:Eq Raise raises:CoconutException('cannot compile ' + showpath(filepath) + ' to itself', extra='incorrect file extension') If Compare op:IsNot Assign If Compare op:In If Raise raises:CoconutException('found destination path with ' + dest_ext + ' extension; aborting compilation', extra='pass --force to override') Return return:yes" - }, - { - "library": "mongo", - "name": "commit_transaction", - "source_code": "async def commit_transaction(self) -> None: self._check_ended() state = self._transaction.state if state is _TxnState.NONE: raise InvalidOperation('No transaction started') elif state in (_TxnState.STARTING, _TxnState.COMMITTED_EMPTY): self._transaction.state = _TxnState.COMMITTED_EMPTY return elif state is _TxnState.ABORTED: raise InvalidOperation('Cannot call commitTransaction after calling abortTransaction') elif state is _TxnState.COMMITTED: self._transaction.state = _TxnState.IN_PROGRESS try: await self._finish_transaction_with_retry('commitTransaction') except ConnectionFailure as exc: exc._remove_error_label('TransientTransactionError') _reraise_with_unknown_commit(exc) except WTimeoutError as exc: _reraise_with_unknown_commit(exc) except OperationFailure as exc: if exc.code not in _UNKNOWN_COMMIT_ERROR_CODES: raise _reraise_with_unknown_commit(exc) finally: self._transaction.state = _TxnState.COMMITTED", - "docstring": "Commit a multi-statement transaction. .. versionadded:: 3.7", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\client_session.py", - "ast_data": "AsyncFunctionDef name:commit_transaction arguments arg:self Assign If Compare op:Is Raise raises:InvalidOperation('No transaction started') If Compare op:In Assign Return return:no If Compare op:Is Raise raises:InvalidOperation('Cannot call commitTransaction after calling abortTransaction') If Compare op:Is Assign Try ExceptHandler ExceptHandler ExceptHandler If Compare op:NotIn Raise Assign" - }, - { - "library": "tensorflow", - "name": "serialize_state", - "source_code": "def serialize_state(self, name = None): if self._reader_ref.dtype = = dtypes.resource: return gen_io_ops.reader_serialize_state_v2(self._reader_ref, name = name) else: return gen_io_ops.reader_serialize_state(self._reader_ref, name = name)", - "docstring": "Produce a string tensor that encodes the state of a reader. Not all Readers support being serialized, so this can produce an Unimplemented error. Args: name: A name for the operation (optional). Returns: A string Tensor.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py", - "ast_data": "FunctionDef name:serialize_state arguments arg:self arg:name If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "mongo", - "name": "delete_one", - "source_code": "async def delete_one(self, filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, session: Optional[AsyncClientSession] = None, let: Optional[Mapping[str, Any]] = None, comment: Optional[Any] = None) -> DeleteResult: write_concern = self._write_concern_for(session) return DeleteResult(await self._delete_retryable(filter, False, write_concern = write_concern, collation = collation, hint = hint, session = session, let = let, comment = comment), write_concern.acknowledged)", - "docstring": "Delete a single document matching the filter. >>> await db.test.count_documents({'x': 1}) 3 >>> result = await db.test.delete_one({'x': 1}) >>> result.deleted_count 1 >>> await db.test.count_documents({'x': 1}) 2 :param filter: A query that matches the document to delete. :param collation: An instance of :class:. :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to :meth: (e.g. `~pymongo.asynchronous.client_session.AsyncClientSession~pymongo.results.DeleteResultcollation` option. .. versionadded:: 3.0", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\collection.py", - "ast_data": "AsyncFunctionDef name:delete_one arguments arg:self arg:filter type:Mapping[str, Any] arg:collation type:Optional[_CollationIn] arg:hint type:Optional[_IndexKeyHint] arg:session type:Optional[AsyncClientSession] arg:let type:Optional[Mapping[str, Any]] arg:comment type:Optional[Any] Assign Call call:_write_concern_for Return return:yes" - }, - { - "library": "tensorflow", - "name": "last_checkpoints", - "source_code": "@property def last_checkpoints(self): return list((self._CheckpointFilename(p) for p in self._last_checkpoints))", - "docstring": "List of not-yet-deleted checkpoint filenames. You can pass any of the returned values to . Returns: A list of checkpoint filenames, sorted from oldest to newest.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\saver.py", - "ast_data": "FunctionDef name:last_checkpoints arguments arg:self Return return:yes" - }, - { - "library": "mongo", - "name": "connection_checked_out", - "source_code": "def connection_checked_out(self, event: ConnectionCheckedOutEvent) -> None: raise NotImplementedError", - "docstring": "Abstract method to handle a :class:. Emitted when the driver successfully checks out a connection. :param event: An instance of :class:.", - "type": "method", - "file_path": "mongo\\pymongo\\monitoring.py", - "ast_data": "FunctionDef name:connection_checked_out arguments arg:self arg:event type:ConnectionCheckedOutEvent Raise raises:NotImplementedError" - }, - { - "library": "numpy", - "name": "deriv", - "source_code": "def deriv(self, m = 1): return poly1d(polyder(self.coeffs, m = m))", - "docstring": "Return a derivative of this polynomial. Refer to for full documentation. See Also -------- polyder : equivalent function", - "type": "method", - "file_path": "numpy\\numpy\\lib\\_polynomial_impl.py", - "ast_data": "FunctionDef name:deriv arguments arg:self arg:m Return return:yes" - }, - { - "library": "scikit-learn", - "name": "gradient", - "source_code": "def gradient(self, coef, X, y, sample_weight = None, l2_reg_strength = 0.0, n_threads = 1, raw_prediction = None): (n_samples, n_features), n_classes = (X.shape, self.base_loss.n_classes) n_dof = n_features + int(self.fit_intercept) if raw_prediction is None: weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X) else: weights, intercept = self.weight_intercept(coef) grad_pointwise = self.base_loss.gradient(y_true = y, raw_prediction = raw_prediction, sample_weight = sample_weight, n_threads = n_threads) sw_sum = n_samples if sample_weight is None else np.sum(sample_weight) grad_pointwise / = sw_sum if not self.base_loss.is_multiclass: grad = np.empty_like(coef, dtype = weights.dtype) grad[: n_features] = X.T @ grad_pointwise + l2_reg_strength * weights if self.fit_intercept: grad[-1] = grad_pointwise.sum() return grad else: grad = np.empty((n_classes, n_dof), dtype = weights.dtype, order = 'F') grad[:, : n_features] = grad_pointwise.T @ X + l2_reg_strength * weights if self.fit_intercept: grad[:, -1] = grad_pointwise.sum(axis = 0) if coef.ndim = = 1: return grad.ravel(order = 'F') else: return grad", - "docstring": "Computes the gradient w.r.t. coef. Parameters ---------- coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) Coefficients of a linear model. If shape (n_classes * n_dof,), the classes of one feature are contiguous, i.e. one reconstructs the 2d-array via coef.reshape((n_classes, -1), order=\"F\"). X : {array-like, sparse matrix} of shape (n_samples, n_features) Training data. y : contiguous array of shape (n_samples,) Observed, true target values. sample_weight : None or contiguous array of shape (n_samples,), default=None Sample weights. l2_reg_strength : float, default=0.0 L2 regularization strength n_threads : int, default=1 Number of OpenMP threads to use. raw_prediction : C-contiguous array of shape (n_samples,) or array of shape (n_samples, n_classes) Raw prediction values (in link space). If provided, these are used. If None, then raw_prediction = X @ coef + intercept is calculated. Returns ------- gradient : ndarray of shape coef.shape The gradient of the loss.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\linear_model\\_linear_loss.py", - "ast_data": "FunctionDef name:gradient arguments arg:self arg:coef arg:X arg:y arg:sample_weight arg:l2_reg_strength arg:n_threads arg:raw_prediction Assign Assign If Compare op:Is Assign Call call:weight_intercept_raw Assign Call call:weight_intercept Assign Call call:gradient Assign If Assign Call call:empty_like Assign If Assign Call call:sum Return return:yes Assign Call call:empty Assign If Assign Call call:sum If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "coconut", - "name": "maybe_copy_elem", - "source_code": "def maybe_copy_elem(item, name): item_ref_count = sys.getrefcount(item) if CPYTHON and (not on_new_python) else float('inf') internal_assert(lambda: item_ref_count > = temp_grammar_item_ref_count, 'add_action got item with too low ref count', (item, type(item), item_ref_count)) if item_ref_count < = temp_grammar_item_ref_count: if DEVELOP: logger.record_stat('maybe_copy_' + name, False) return item else: if DEVELOP: logger.record_stat('maybe_copy_' + name, True) return item.copy()", - "docstring": "Copy the given grammar element if it's referenced somewhere else.", - "type": "function", - "file_path": "coconut\\coconut\\compiler\\util.py", - "ast_data": "FunctionDef name:maybe_copy_elem arguments arg:item arg:name Assign If Compare op:LtE If Return return:yes If Return return:yes" - }, - { - "library": "pandas", - "name": "subtype", - "source_code": "@property def subtype(self): return self._subtype", - "docstring": "The dtype of the Interval bounds. See Also -------- IntervalDtype: An ExtensionDtype for Interval data. Examples -------- >>> dtype = pd.IntervalDtype(subtype=\"int64\", closed=\"both\") >>> dtype.subtype dtype('int64')", - "type": "method", - "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", - "ast_data": "FunctionDef name:subtype arguments arg:self Return return:yes" - }, - { - "library": "cryptography", - "name": "tag", - "source_code": "@property @abc.abstractmethod def tag(self) -> bytes: pass", - "docstring": "Returns tag bytes. This is only available after encryption is finalized.", - "type": "method", - "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\ciphers\\base.py", - "ast_data": "FunctionDef name:tag arguments arg:self" - }, - { - "library": "pytorch", - "name": "unpad_sequence", - "source_code": "def unpad_sequence(padded_sequences: Tensor, lengths: Tensor, batch_first: bool = False) -> list[Tensor]: unpadded_sequences = [] if not batch_first: padded_sequences.transpose_(0, 1) max_length = padded_sequences.shape[1] idx = torch.arange(max_length, device = lengths.device) for seq, length in zip(padded_sequences, lengths): mask = idx < length unpacked_seq = seq[mask] unpadded_sequences.append(unpacked_seq) return unpadded_sequences", - "docstring": "Unpad padded Tensor into a list of variable length Tensors. `Tensor` objects", - "type": "function", - "file_path": "pytorch\\torch\\nn\\utils\\rnn.py", - "ast_data": "FunctionDef name:unpad_sequence arguments arg:padded_sequences type:Tensor arg:lengths type:Tensor arg:batch_first type:bool Assign If Assign Assign Call call:arange For Call call:zip Assign Compare op:Lt Assign Return return:yes" - }, - { - "library": "pandas", - "name": "get_dataframe_repr_params", - "source_code": "def get_dataframe_repr_params() -> dict[str, Any]: from pandas.io.formats import console if get_option('display.expand_frame_repr'): line_width, _ = console.get_console_size() else: line_width = None return {'max_rows': get_option('display.max_rows'), 'min_rows': get_option('display.min_rows'), 'max_cols': get_option('display.max_columns'), 'max_colwidth': get_option('display.max_colwidth'), 'show_dimensions': get_option('display.show_dimensions'), 'line_width': line_width}", - "docstring": "Get the parameters used to repr(dataFrame) calls using DataFrame.to_string. Supplying these parameters to DataFrame.to_string is equivalent to calling ``. This is useful if you want to adjust the repr output. .. versionadded:: 1.4.0 Example ------- >>> import pandas as pd >>> >>> df = pd.DataFrame([[1, 2], [3, 4]]) >>> repr_params = pd.io.formats.format.get_dataframe_repr_params() >>> repr(df) == df.to_string(**repr_params) True", - "type": "function", - "file_path": "pandas\\pandas\\io\\formats\\format.py", - "ast_data": "FunctionDef name:get_dataframe_repr_params arguments If Call call:get_option Assign Call call:get_console_size Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "reciprocal", - "source_code": "@staticmethod def reciprocal(x): x = ValueRanges.wrap(x) if 0 in x: return ValueRanges.unknown() else: return ValueRanges.decreasing_map(x, lambda y: FloatTrueDiv(1.0, y))", - "docstring": "Needed as it's used in pow, but it won't appear on a SymPy expression", - "type": "method", - "file_path": "pytorch\\torch\\utils\\_sympy\\value_ranges.py", - "ast_data": "FunctionDef name:reciprocal arguments arg:x Assign Call call:wrap If Compare op:In Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "get_random_secret_key", - "source_code": "def get_random_secret_key(): chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_ = +)' return get_random_string(50, chars)", - "docstring": "Return a 50 character random string usable as a SECRET_KEY setting value.", - "type": "function", - "file_path": "django\\django\\core\\management\\utils.py", - "ast_data": "FunctionDef name:get_random_secret_key arguments Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "log_prob", - "source_code": "def log_prob(self, value, name = 'log_prob'): return self._call_log_prob(value, name)", - "docstring": "Log probability density/mass function. Args: value: or . name: Python prepended to names of ops created by this function. Returns: log_prob: a of shape with values of type .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py", - "ast_data": "FunctionDef name:log_prob arguments arg:self arg:value arg:name Return return:yes" - }, - { - "library": "mongo", - "name": "gen_unordered", - "source_code": "def gen_unordered(self) -> Iterator[_Run]: operations = [_Run(_INSERT), _Run(_UPDATE), _Run(_DELETE)] for idx, (op_type, operation) in enumerate(self.ops): operations[op_type].add(idx, operation) for run in operations: if run.ops: yield run", - "docstring": "Generate batches of operations, batched by type of operation, in arbitrary order.", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\bulk.py", - "ast_data": "FunctionDef name:gen_unordered arguments arg:self Assign For Call call:enumerate For If" - }, - { - "library": "django", - "name": "delete", - "source_code": "def delete(self, session_key = None): self._session_key = '' self._session_cache = {} self.modified = True", - "docstring": "To delete, clear the session key and the underlying data structure and set the modified flag so that the cookie is set on the client for the current request.", - "type": "method", - "file_path": "django\\django\\contrib\\sessions\\backends\\signed_cookies.py", - "ast_data": "FunctionDef name:delete arguments arg:self arg:session_key Assign Assign Assign" - }, - { - "library": "scikit-learn", - "name": "max", - "source_code": "def max(self, y: Array | complex, /, copy: bool | None = None, xp: ModuleType | None = None) -> Array: xp = array_namespace(self._x) if xp is None else xp mxp = meta_namespace(self._x, xp = xp) y = xp.asarray(y) return self._op(_AtOp.MAX, mxp.maximum, mxp.maximum, y, copy = copy, xp = xp)", - "docstring": "Apply `` and return the updated array.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_at.py", - "ast_data": "FunctionDef name:max arguments arg:copy type:bool | None arg:xp type:ModuleType | None Assign Assign Call call:meta_namespace Assign Call call:asarray Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, input_fn, input_workers, input_contexts, strategy): assert isinstance(input_workers, input_lib.InputWorkers) if input_workers.num_workers ! = len(input_contexts): raise ValueError('Number of input workers (%d) is not same as number of input_contexts (%d)' % (input_workers.num_workers, len(input_contexts))) iterators = [] for i, ctx in enumerate(input_contexts): worker = input_workers.worker_devices[i] with ops.device(worker): result = input_fn(ctx) devices = input_workers.compute_devices_for_worker(i) if isinstance(result, data_types.DatasetV2): iterator = _SingleWorkerDatasetIterator(result, worker, devices) elif callable(result): iterator = _SingleWorkerCallableIterator(result, worker, devices) else: raise ValueError('input_fn must return a tf.data.Dataset or a callable.') iterators.append(iterator) super(InputFunctionIterator, self).__init__(input_workers, iterators, strategy, cardinality = cardinality_lib.UNKNOWN, enable_get_next_as_optional = False) self._enable_get_next_as_optional = False", - "docstring": "Make an iterator for input provided via an input function. Currently implements PER_WORKER mode, in which the is called once on each worker. TODO(priyag): Add other replication modes. Args: input_fn: Input function that returns a object. input_workers: an object. input_contexts: A list of instances to be passed to call(s) to . Length and order should match worker order in . strategy: a object, used to run all-reduce to handle last partial batch.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\input_lib.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:input_fn arg:input_workers arg:input_contexts arg:strategy If Compare op:NotEq Raise raises:ValueError('Number of input workers (%d) is not same as number of input_contexts (%d)' % (input_workers.num_workers, len(input_contexts))) Assign For Call call:enumerate Assign With Assign Call call:input_fn Assign Call call:compute_devices_for_worker If Call call:isinstance Assign Call call:_SingleWorkerDatasetIterator If Call call:callable Assign Call call:_SingleWorkerCallableIterator Raise raises:ValueError('input_fn must return a tf.data.Dataset or a callable.') Assign" - }, - { - "library": "tensorflow", - "name": "serialize", - "source_code": "@dispatch.add_dispatch_support def serialize(activation): if hasattr(activation, '__name__') and activation.__name__ in _TF_ACTIVATIONS_V2: return _TF_ACTIVATIONS_V2[activation.__name__] return serialize_keras_object(activation)", - "docstring": "Returns the string identifier of an activation function. Args: activation : Function object. Returns: String denoting the name attribute of the input function For example: >>> tf.keras.activations.serialize(tf.keras.activations.tanh) 'tanh' >>> tf.keras.activations.serialize(tf.keras.activations.sigmoid) 'sigmoid' >>> tf.keras.activations.serialize('abcd') Traceback (most recent call last): ... ValueError: ('Cannot serialize', 'abcd') Raises: ValueError: The input function is not a valid one.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py", - "ast_data": "FunctionDef name:serialize arguments arg:activation If BoolOp Call call:hasattr Compare op:In Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "apply_match", - "source_code": "def apply_match(modules: dict[str, nn.ModuleDict], pattern: Union[tuple[Any], Any], node: Node, matched_node_pattern: list[Node]) -> Optional[list[Node]]: if isinstance(pattern, tuple): if len(pattern) = = 1: if _match(modules, node, pattern[0]): return matched_node_pattern + [node] first, *rest = pattern if _match(modules, node, first): if rest is None: return matched_node_pattern + [node] for user in node.users: return apply_match(modules, tuple(rest), user, matched_node_pattern + [node]) elif _match(modules, node, pattern): return [node] return None", - "docstring": "This function will return the matched nodes if the pattern matches the node given If there is no match, it will return None", - "type": "function", - "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\pruner\\match_utils.py", - "ast_data": "FunctionDef name:apply_match arguments arg:modules type:dict[str, nn.ModuleDict] arg:pattern type:Union[tuple[Any], Any] arg:node type:Node arg:matched_node_pattern type:list[Node] If Call call:isinstance If Compare op:Eq If Call call:_match Return return:yes Assign If Call call:_match If Compare op:Is Return return:yes For Return return:yes If Call call:_match Return return:yes Return return:yes" - }, - { - "library": "seaborn", - "name": "__call__", - "source_code": "def __call__(self, data, var): vals = data[var] if callable(self.estimator): estimate = self.estimator(vals) else: estimate = vals.agg(self.estimator) if self.error_method is None: err_min = err_max = np.nan elif len(data) < = 1: err_min = err_max = np.nan elif callable(self.error_method): err_min, err_max = self.error_method(vals) elif self.error_method = = 'sd': half_interval = vals.std() * self.error_level err_min, err_max = (estimate - half_interval, estimate + half_interval) elif self.error_method = = 'se': half_interval = vals.sem() * self.error_level err_min, err_max = (estimate - half_interval, estimate + half_interval) elif self.error_method = = 'pi': err_min, err_max = _percentile_interval(vals, self.error_level) elif self.error_method = = 'ci': units = data.get('units', None) boots = bootstrap(vals, units = units, func = self.estimator, **self.boot_kws) err_min, err_max = _percentile_interval(boots, self.error_level) return pd.Series({var: estimate, f'{var}min': err_min, f'{var}max': err_max})", - "docstring": "Aggregate over column of with estimate and error interval.", - "type": "method", - "file_path": "seaborn\\seaborn\\_statistics.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:data arg:var Assign If Call call:callable Assign Call call:estimator Assign Call call:agg If Compare op:Is Assign If Compare op:LtE Assign If Call call:callable Assign Call call:error_method If Compare op:Eq Assign Assign If Compare op:Eq Assign Assign If Compare op:Eq Assign Call call:_percentile_interval If Compare op:Eq Assign Call call:get Assign Call call:bootstrap Assign Call call:_percentile_interval Return return:yes" - }, - { - "library": "django", - "name": "actions", - "source_code": "@property def actions(self): return self._actions.items()", - "docstring": "Get all the enabled actions as an iterable of (name, func).", - "type": "method", - "file_path": "django\\django\\contrib\\admin\\sites.py", - "ast_data": "FunctionDef name:actions arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "get_local_bwd_output", - "source_code": "def get_local_bwd_output(self, mb_index): assert self.has_backward, \"can't steal_bwd_input if this stage doesn't have backward\" assert not self.is_first, \"can't get bwd output if this stage is first\" self._check_chunk_id(mb_index) return self.bwd_cache.pop(mb_index)", - "docstring": "Returns the input grad tensors for this stage, which correspond to the stage inputs during forward.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py", - "ast_data": "FunctionDef name:get_local_bwd_output arguments arg:self arg:mb_index Return return:yes" - }, - { - "library": "scipy", - "name": "__call__", - "source_code": "def __call__(self, dim = None, seed = None): return special_ortho_group_frozen(dim, seed = seed)", - "docstring": "Create a frozen SO(N) distribution. See for more information.", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_multivariate.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:dim arg:seed Return return:yes" - }, - { - "library": "pytorch", - "name": "set_float_to_observed_mapping", - "source_code": "def set_float_to_observed_mapping(self, float_class: type, observed_class: type, quant_type: QuantType = QuantType.STATIC) -> PrepareCustomConfig: if quant_type ! = QuantType.STATIC: raise ValueError('set_float_to_observed_mapping is currently only supported for static quantization') if quant_type not in self.float_to_observed_mapping: self.float_to_observed_mapping[quant_type] = {} self.float_to_observed_mapping[quant_type][float_class] = observed_class return self", - "docstring": "Set the mapping from a custom float module class to a custom observed module class. The observed module class must have a `` class method that converts the float module class to the observed module class. This is currently only supported for static quantization.", - "type": "method", - "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py", - "ast_data": "FunctionDef name:set_float_to_observed_mapping arguments arg:self arg:float_class type:type arg:observed_class type:type arg:quant_type type:QuantType If Compare op:NotEq Raise raises:ValueError('set_float_to_observed_mapping is currently only supported for static quantization') If Compare op:NotIn Assign Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, seq): self.seq = seq self.offset_string = ''", - "docstring": "Set the sequence *seq* of strings that will be used for labels.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:seq Assign Assign" - }, - { - "library": "pytorch", - "name": "GaussianNLLLoss", - "source_code": "class GaussianNLLLoss(_Loss): __constants__ = ['full', 'eps', 'reduction'] full: bool eps: float def __init__(self, *, full: bool = False, eps: float = 1e-06, reduction: str = 'mean') -> None: super().__init__(None, None, reduction) self.full = full self.eps = eps def forward(self, input: Tensor, target: Tensor, var: Union[Tensor, float]) -> Tensor: return F.gaussian_nll_loss(input, target, var, full = self.full, eps = self.eps, reduction = self.reduction)", - "docstring": "Gaussian negative log likelihood loss. The targets are treated as samples from Gaussian distributions with expectations and variances predicted by the neural network. For a `epsfull(N, *)(*)*(N, *)(*)(N, *)(*)reductionreduction(N, *)` is ignored with respect to autograd, and so the gradients are unaffected by it. Reference: Nix, D. A. and Weigend, A. S., \"Estimating the mean and variance of the target probability distribution\", Proceedings of 1994 IEEE International Conference on Neural Networks (ICNN'94), Orlando, FL, USA, 1994, pp. 55-60 vol.1, doi: 10.1109/ICNN.1994.374138.", - "type": "class", - "file_path": "pytorch\\torch\\nn\\modules\\loss.py", - "ast_data": "ClassDef name:GaussianNLLLoss Assign FunctionDef name:__init__ arguments arg:self Assign Assign FunctionDef name:forward arguments arg:self arg:input type:Tensor arg:target type:Tensor arg:var type:Union[Tensor, float] Return return:yes" - }, - { - "library": "sphinx", - "name": "get_terms", - "source_code": "def get_terms(self, fn2index: dict[str, int]) -> tuple[dict[str, list[int] | int], dict[str, list[int] | int]]: rvs: tuple[dict[str, list[int] | int], dict[str, list[int] | int]] = ({}, {}) for rv, mapping in zip(rvs, (self._mapping, self._title_mapping), strict = True): for k, v in mapping.items(): if len(v) = = 1: fn, = v if fn in fn2index: rv[k] = fn2index[fn] else: rv[k] = sorted((fn2index[fn] for fn in v if fn in fn2index)) return rvs", - "docstring": "Return a mapping of document and title terms to sorted document IDs. When a term is only found within a single document, then the value for that term will be an integer value. When a term is found within multiple documents, the value will be a list of integers.", - "type": "method", - "file_path": "sphinx\\sphinx\\search\\__init__.py", - "ast_data": "FunctionDef name:get_terms arguments arg:self arg:fn2index type:dict[str, int] For Call call:zip For Call call:items If Compare op:Eq Assign If Compare op:In Assign Assign Call call:sorted Return return:yes" - }, - { - "library": "django", - "name": "__getitem__", - "source_code": "def __getitem__(self, key): try: list_ = super().__getitem__(key) except KeyError: raise MultiValueDictKeyError(key) try: return list_[-1] except IndexError: return []", - "docstring": "Return the last data value for this key, or [] if it's an empty list; raise KeyError if not found.", - "type": "method", - "file_path": "django\\django\\utils\\datastructures.py", - "ast_data": "FunctionDef name:__getitem__ arguments arg:self arg:key Try Assign Call call:__getitem__ ExceptHandler Raise raises:MultiValueDictKeyError(key) Try Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "django", - "name": "safeseq", - "source_code": "@register.filter(is_safe = True) def safeseq(value): return [mark_safe(obj) for obj in value]", - "docstring": "A \"safe\" filter for sequences. Mark each element in the sequence, individually, as safe, after converting them to strings. Return a list with the results.", - "type": "function", - "file_path": "django\\django\\template\\defaultfilters.py", - "ast_data": "FunctionDef name:safeseq arguments arg:value Call call:filter Return return:yes" - }, - { - "library": "django", - "name": "to_python", - "source_code": "def to_python(self, value): if value in self.empty_values: return None if isinstance(value, datetime.time): return value return super().to_python(value)", - "docstring": "Validate that the input can be converted to a time. Return a Python datetime.time object.", - "type": "method", - "file_path": "django\\django\\forms\\fields.py", - "ast_data": "FunctionDef name:to_python arguments arg:self arg:value If Compare op:In Return return:yes If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "on_changed", - "source_code": "def on_changed(self, func): return self._observers.connect('changed', lambda val: func(val))", - "docstring": "Connect *func* as callback function to changes of the slider value. Parameters ---------- func : callable Function to call when slider is changed. The function must accept a single float as its arguments. Returns ------- int Connection id (which can be used to disconnect *func*).", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", - "ast_data": "FunctionDef name:on_changed arguments arg:self arg:func Return return:yes" - }, - { - "library": "pytorch", - "name": "module_load", - "source_code": "def module_load(self, other, assign = False): if has_torch_function_variadic(self, other): return handle_torch_function(Tensor.module_load, (self, other), self, other, assign = assign) if assign: return other.detach() else: return self.copy_(other).detach()", - "docstring": "Defines how to transform `~nn.Module.load_state_dict~torch.__future__.get_swap_module_params_on_conversion~torch.utils.swap_tensors~nn.Module.load_state_dictnn.Module.load_state_dict`", - "type": "method", - "file_path": "pytorch\\torch\\_tensor.py", - "ast_data": "FunctionDef name:module_load arguments arg:self arg:other arg:assign If Call call:has_torch_function_variadic Return return:yes If Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "__getitem__", - "source_code": "def __getitem__(self, index): if isinstance(index, str): try: layer = capi.get_layer_by_name(self.ptr, force_bytes(index)) except GDALException: raise IndexError('Invalid OGR layer name given: %s.' % index) elif isinstance(index, int): if 0 < = index < self.layer_count: layer = capi.get_layer(self._ptr, index) else: raise IndexError('Index out of range when accessing layers in a datasource: %s.' % index) else: raise TypeError('Invalid index type: %s' % type(index)) return Layer(layer, self)", - "docstring": "Allows use of the index [] operator to get a layer at the index.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\datasource.py", - "ast_data": "FunctionDef name:__getitem__ arguments arg:self arg:index If Call call:isinstance Try Assign Call call:get_layer_by_name ExceptHandler Raise raises:IndexError('Invalid OGR layer name given: %s.' % index) If Call call:isinstance If Compare op:LtE op:Lt Assign Call call:get_layer Raise raises:IndexError('Index out of range when accessing layers in a datasource: %s.' % index) Raise raises:TypeError('Invalid index type: %s' % type(index)) Return return:yes" - }, - { - "library": "matplotlib", - "name": "scaled", - "source_code": "def scaled(self, sx, sy = None): if sy is None: sy = sx new_marker = MarkerStyle(self) _transform = new_marker._user_transform or Affine2D() new_marker._user_transform = _transform.scale(sx, sy) return new_marker", - "docstring": "Return new marker scaled by specified scale factors. If *sy* is not given, the same scale is applied in both the *x*- and *y*-directions. Parameters ---------- sx : float *X*-direction scaling factor. sy : float, optional *Y*-direction scaling factor.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\markers.py", - "ast_data": "FunctionDef name:scaled arguments arg:self arg:sx arg:sy If Compare op:Is Assign Assign Call call:MarkerStyle Assign BoolOp Call call:Affine2D Assign Call call:scale Return return:yes" - }, - { - "library": "scikit-learn", - "name": "gibbs", - "source_code": "def gibbs(self, v): check_is_fitted(self) if not hasattr(self, 'random_state_'): self.random_state_ = check_random_state(self.random_state) h_ = self._sample_hiddens(v, self.random_state_) v_ = self._sample_visibles(h_, self.random_state_) return v_", - "docstring": "Perform one Gibbs sampling step. Parameters ---------- v : ndarray of shape (n_samples, n_features) Values of the visible layer to start from. Returns ------- v_new : ndarray of shape (n_samples, n_features) Values of the visible layer after one Gibbs step.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\neural_network\\_rbm.py", - "ast_data": "FunctionDef name:gibbs arguments arg:self arg:v If Assign Call call:check_random_state Assign Call call:_sample_hiddens Assign Call call:_sample_visibles Return return:yes" - }, - { - "library": "sphinx", - "name": "smart_capwords", - "source_code": "def smart_capwords(s: str, sep: str | None = None) -> str: words = s.split(sep) for i, word in enumerate(words): if all((x.islower() for x in word)): words[i] = word.capitalize() return (sep or ' ').join(words)", - "docstring": "Like string.capwords() but does not capitalize words that already contain a capital letter.", - "type": "function", - "file_path": "sphinx\\sphinx\\writers\\texinfo.py", - "ast_data": "FunctionDef name:smart_capwords arguments arg:s type:str arg:sep type:str | None Assign Call call:split For Call call:enumerate If Call call:all Assign Call call:capitalize Return return:yes" - }, - { - "library": "matplotlib", - "name": "paint_path", - "source_code": "@classmethod def paint_path(cls, fill, stroke): if stroke: if fill: return cls.fill_stroke else: return cls.stroke elif fill: return cls.fill else: return cls.endpath", - "docstring": "Return the PDF operator to paint a path. Parameters ---------- fill : bool Fill the path with the fill color. stroke : bool Stroke the outline of the path with the line color.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py", - "ast_data": "FunctionDef name:paint_path arguments arg:cls arg:fill arg:stroke If If Return return:yes Return return:yes If Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "resolve", - "source_code": "def resolve(node, source, context_filepath, context_lineno, context_col_offset): code_reader = io.StringIO(source) comments_map = {} try: for token in tokenize.generate_tokens(code_reader.readline): tok_type, tok_string, loc, _, _ = token srow, _ = loc if tok_type = = tokenize.COMMENT: comments_map[srow] = tok_string.strip()[1:].strip() except tokenize.TokenError: if isinstance(node, gast.Lambda): pass else: raise source_lines = source.split('\\n') visitor = OriginResolver(node, source_lines, comments_map, context_lineno, context_col_offset, context_filepath) visitor.visit(node)", - "docstring": "Adds origin information to an AST, based on the source it was loaded from. This allows us to map the original source code line numbers to generated source code. Note: the AST may be a part of a larger context (e.g. a function is part of a module that may contain other things). However, this function does not assume the source argument contains the entire context, nor that it contains only code corresponding to node itself. However, it assumes that node was parsed from the given source code. For this reason, two extra arguments are required, and they indicate the location of the node in the original context. Args: node: gast.AST, the AST to annotate. source: Text, the source code representing node. context_filepath: Text context_lineno: int context_col_offset: int", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\origin_info.py", - "ast_data": "FunctionDef name:resolve arguments arg:node arg:source arg:context_filepath arg:context_lineno arg:context_col_offset Assign Call call:StringIO Assign Try For Call call:generate_tokens Assign Assign If Compare op:Eq Assign Call call:strip ExceptHandler If Call call:isinstance Raise Assign Call call:split Assign Call call:OriginResolver" - }, - { - "library": "scrapy", - "name": "SettingsAttribute", - "source_code": "class SettingsAttribute: def __init__(self, value: Any, priority: int): self.value: Any = value self.priority: int if isinstance(self.value, BaseSettings): self.priority = max(self.value.maxpriority(), priority) else: self.priority = priority def set(self, value: Any, priority: int) -> None: if priority > = self.priority: if isinstance(self.value, BaseSettings): value = BaseSettings(value, priority = priority) self.value = value self.priority = priority def __repr__(self) -> str: return f''", - "docstring": "Class for storing data related to settings attributes. This class is intended for internal usage, you should try Settings class for settings configuration, not this one.", - "type": "class", - "file_path": "scrapy\\scrapy\\settings\\__init__.py", - "ast_data": "ClassDef name:SettingsAttribute FunctionDef name:__init__ arguments arg:self arg:value type:Any arg:priority type:int If Call call:isinstance Assign Call call:max Assign FunctionDef name:set arguments arg:self arg:value type:Any arg:priority type:int If Compare op:GtE If Call call:isinstance Assign Call call:BaseSettings Assign Assign FunctionDef name:__repr__ arguments arg:self Return return:yes" - }, - { - "library": "numpy", - "name": "polyadd", - "source_code": "def polyadd(c1, c2): return pu._add(c1, c2)", - "docstring": "Add one polynomial to another. Returns the sum of two polynomials + . The arguments are sequences of coefficients from lowest order term to highest, i.e., [1,2,3] represents the polynomial ``. Parameters ---------- c1, c2 : array_like 1-D arrays of polynomial coefficients ordered from low to high. Returns ------- out : ndarray The coefficient array representing their sum. See Also -------- polysub, polymulx, polymul, polydiv, polypow Examples -------- >>> from numpy.polynomial import polynomial as P >>> c1 = (1, 2, 3) >>> c2 = (3, 2, 1) >>> sum = P.polyadd(c1,c2); sum array([4., 4., 4.]) >>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2) 28.0", - "type": "function", - "file_path": "numpy\\numpy\\polynomial\\polynomial.py", - "ast_data": "FunctionDef name:polyadd arguments arg:c1 arg:c2 Return return:yes" - }, - { - "library": "matplotlib", - "name": "xyann", - "source_code": "@property def xyann(self): return self.get_position()", - "docstring": "The text position. See also *xytext* in .", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\text.py", - "ast_data": "FunctionDef name:xyann arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "__init__", - "source_code": "def __init__(self, gm: torch.fx.GraphModule, has_user_defined_triton_kernels: bool = False) -> None: self._stream = io.BytesIO() super().__init__(self._stream) self.dispatch_table = copyreg.dispatch_table.copy() self.dispatch_table.update({FakeTensor: functools.partial(self._reduce_fake_tensor), torch.Tensor: functools.partial(self._reduce_tensor), torch.nn.parameter.Parameter: functools.partial(self._reduce_tensor), torch.SymInt: functools.partial(self._reduce_symint), torch.fx.experimental._backward_state.BackwardState: functools.partial(self._reduce_unsupported)}) if has_user_defined_triton_kernels: self.dispatch_table[gm.__class__] = functools.partial(self._reduce_graph_module) self.fast = True", - "docstring": "Create an FX graph pickler. If include_non_inlined=True, then pickling will include the _values_ for all Tensors. (Note that any tensors are constants attached as attributes to the GraphModule). Otherwise, pickling will include only the metadata for these tensors.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codecache.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:gm type:torch.fx.GraphModule arg:has_user_defined_triton_kernels type:bool Assign Call call:BytesIO Assign Call call:copy If Assign Call call:partial Assign" - }, - { - "library": "pytorch", - "name": "conv_flop_count", - "source_code": "def conv_flop_count(x_shape: list[int], w_shape: list[int], out_shape: list[int], transposed: bool = False) -> int: batch_size = x_shape[0] conv_shape = (x_shape if transposed else out_shape)[2:] c_out, c_in, *filter_size = w_shape \"\\n General idea here is that for a regular conv, for each point in the output\\n spatial dimension we convolve the filter with something (hence\\n `prod(conv_shape) * prod(filter_size)` ops). Then, this gets multiplied by\\n 1. batch_size, 2. the cross product of input and weight channels.\\n\\n For the transpose, it's not each point in the *output* spatial dimension but\\n each point in the *input* spatial dimension.\\n \" flop = prod(conv_shape) * prod(filter_size) * batch_size * c_out * c_in * 2 return flop", - "docstring": "Count flops for convolution. Note only multiplication is counted. Computation for bias are ignored. Flops for a transposed convolution are calculated as flops = (x_shape[2:] * prod(w_shape) * batch_size). Args: x_shape (list(int)): The input shape before convolution. w_shape (list(int)): The filter shape. out_shape (list(int)): The output shape after convolution. transposed (bool): is the convolution transposed Returns: int: the number of flops", - "type": "function", - "file_path": "pytorch\\torch\\utils\\flop_counter.py", - "ast_data": "FunctionDef name:conv_flop_count arguments arg:x_shape type:list[int] arg:w_shape type:list[int] arg:out_shape type:list[int] arg:transposed type:bool Assign Assign Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "execute_with_callbacks", - "source_code": "def execute_with_callbacks(op_name, num_outputs, inputs, attrs, ctx, name = None): tensors = quick_execute(op_name, num_outputs, inputs, attrs, ctx, name) for callback in ctx.op_callbacks: callback(op_name, tuple(inputs), attrs, tensors, name) return tensors", - "docstring": "Monkey-patch to execute to enable execution callbacks.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\eager\\execute.py", - "ast_data": "FunctionDef name:execute_with_callbacks arguments arg:op_name arg:num_outputs arg:inputs arg:attrs arg:ctx arg:name Assign Call call:quick_execute For Return return:yes" - }, - { - "library": "tensorflow", - "name": "set_callback_parameters", - "source_code": "def set_callback_parameters(callback_list, model, do_validation = False, batch_size = None, epochs = None, steps_per_epoch = None, samples = None, verbose = 1, mode = ModeKeys.TRAIN): metric_names = model.metrics_names for cbk in callback_list: if isinstance(cbk, (BaseLogger, ProgbarLogger)): cbk.stateful_metrics = metric_names[1:] callback_metrics = [] if mode ! = ModeKeys.PREDICT: callback_metrics = copy.copy(metric_names) if do_validation: callback_metrics + = ['val_' + n for n in metric_names] callback_params = {'batch_size': batch_size, 'epochs': epochs, 'steps': steps_per_epoch, 'samples': samples, 'verbose': verbose, 'do_validation': do_validation, 'metrics': callback_metrics} callback_list.set_params(callback_params)", - "docstring": "Sets callback parameters. Args: callback_list: CallbackList instance. model: Model being trained. do_validation: Whether or not validation loop will be run. batch_size: Number of samples per batch. epochs: Number of epoch to train. steps_per_epoch: Number of batches to run per training epoch. samples: Number of training samples. verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger. mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT. Which loop mode to configure callbacks for.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", - "ast_data": "FunctionDef name:set_callback_parameters arguments arg:callback_list arg:model arg:do_validation arg:batch_size arg:epochs arg:steps_per_epoch arg:samples arg:verbose arg:mode Assign For If Call call:isinstance Assign Assign If Compare op:NotEq Assign Call call:copy If Assign" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, meshes: List[layout_lib.Mesh], is_async = True, in_flight_nodes_limit = 8): if any((not isinstance(mesh, layout_lib.Mesh) for mesh in meshes)): raise TypeError('Expected a flat list of Mesh objects, got {}'.format(meshes)) global _next_device_number ctx = context.context() with _next_device_number_lock: self.name = '{}/device: CUSTOM: {}'.format(ctx.host_address_space(), _next_device_number) _next_device_number + = 1 device, device_info = _pywrap_dtensor_device.Allocate(self.name, is_async, in_flight_nodes_limit) context.register_custom_device(device, self.name, device_info) self._device_info = device_info self._current_output_layout = None self._current_default_mesh = None self._meshes = set() self._mesh_lock = threading.Lock() for mesh in meshes: self._register_mesh(mesh)", - "docstring": "Create a new DTensorDevice which executes ops on . Args: meshes: A list of objects indicating groups of devices to execute on. These may also be registered lazily. is_async: Indicates whether DTensor operations on this client will return immediately (with \"non-ready\" handles) or block until executed. This is on by default and is exposed as an option for ease of debugging. in_flight_nodes_limit: Indicates the limit of in-flight nodes before enqueueing of async operations to DTensorDevice is blocked. This limit is per mesh. 0 for no limits from DTensor. Default is 8.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\dtensor\\python\\dtensor_device.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:meshes type:List[layout_lib.Mesh] arg:is_async arg:in_flight_nodes_limit If Call call:any Raise raises:TypeError('Expected a flat list of Mesh objects, got {}'.format(meshes)) Assign Call call:context With Assign Call call:format Assign Call call:Allocate Assign Assign Assign Assign Call call:set Assign Call call:Lock For" - }, - { - "library": "authlib", - "name": "authorize_access_token", - "source_code": "def authorize_access_token(self, **kwargs): if request.method = = 'GET': error = request.args.get('error') if error: description = request.args.get('error_description') raise OAuthError(error = error, description = description) params = {'code': request.args.get('code'), 'state': request.args.get('state')} else: params = {'code': request.form.get('code'), 'state': request.form.get('state')} state_data = self.framework.get_state_data(session, params.get('state')) self.framework.clear_state_data(session, params.get('state')) params = self._format_state_params(state_data, params) claims_options = kwargs.pop('claims_options', None) claims_cls = kwargs.pop('claims_cls', None) leeway = kwargs.pop('leeway', 120) token = self.fetch_access_token(**params, **kwargs) self.token = token if 'id_token' in token and 'nonce' in state_data: userinfo = self.parse_id_token(token, nonce = state_data['nonce'], claims_options = claims_options, claims_cls = claims_cls, leeway = leeway) token['userinfo'] = userinfo return token", - "docstring": "Fetch access token in one step. :return: A token dict.", - "type": "method", - "file_path": "authlib\\authlib\\integrations\\flask_client\\apps.py", - "ast_data": "FunctionDef name:authorize_access_token arguments arg:self kwarg:kwargs If Compare op:Eq Assign Call call:get If Assign Call call:get Raise raises:OAuthError(error=error, description=description) Assign Assign Assign Call call:get_state_data Assign Call call:_format_state_params Assign Call call:pop Assign Call call:pop Assign Call call:pop Assign Call call:fetch_access_token Assign If BoolOp Compare op:In Compare op:In Assign Call call:parse_id_token Assign Return return:yes" - }, - { - "library": "django", - "name": "resolve_request", - "source_code": "def resolve_request(self, request): if hasattr(request, 'urlconf'): urlconf = request.urlconf set_urlconf(urlconf) resolver = get_resolver(urlconf) else: resolver = get_resolver() resolver_match = resolver.resolve(request.path_info) request.resolver_match = resolver_match return resolver_match", - "docstring": "Retrieve/set the urlconf for the request. Return the view resolved, with its args and kwargs.", - "type": "method", - "file_path": "django\\django\\core\\handlers\\base.py", - "ast_data": "FunctionDef name:resolve_request arguments arg:self arg:request If Call call:hasattr Assign Assign Call call:get_resolver Assign Call call:get_resolver Assign Call call:resolve Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "variable_shape", - "source_code": "@property def variable_shape(self): return tensor_shape.TensorShape(self.shape)", - "docstring": "Returns a representing the shape of sequence input.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\feature_column\\sequence_feature_column.py", - "ast_data": "FunctionDef name:variable_shape arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "construct_change_message", - "source_code": "def construct_change_message(self, request, form, formsets, add = False): return construct_change_message(form, formsets, add)", - "docstring": "Construct a JSON structure describing changes from a changed object.", - "type": "method", - "file_path": "django\\django\\contrib\\admin\\options.py", - "ast_data": "FunctionDef name:construct_change_message arguments arg:self arg:request arg:form arg:formsets arg:add Return return:yes" - }, - { - "library": "flexx", - "name": "get_component_instance", - "source_code": "def get_component_instance(self, id): return self._component_instances.get(id, None)", - "docstring": "Get PyComponent or JsComponent instance that is associated with this session and has the corresponding id. The returned value can be None if it does not exist, and a returned component can be disposed.", - "type": "method", - "file_path": "flexx\\flexx\\app\\_session.py", - "ast_data": "FunctionDef name:get_component_instance arguments arg:self arg:id Return return:yes" - }, - { - "library": "scrapy", - "name": "scraped", - "source_code": "def scraped(self, item: Any, response: Response | Failure | None, spider: Spider) -> LogFormatterResult: src: Any if response is None: src = f'{global_object_name(spider.__class__)}.start' elif isinstance(response, Failure): src = response.getErrorMessage() else: src = response return {'level': logging.DEBUG, 'msg': SCRAPEDMSG, 'args': {'src': src, 'item': item}}", - "docstring": "Logs a message when an item is scraped by a spider.", - "type": "method", - "file_path": "scrapy\\scrapy\\logformatter.py", - "ast_data": "FunctionDef name:scraped arguments arg:self arg:item type:Any arg:response type:Response | Failure | None arg:spider type:Spider If Compare op:Is Assign If Call call:isinstance Assign Call call:getErrorMessage Assign Return return:yes" - }, - { - "library": "kornia", - "name": "euler_from_quaternion", - "source_code": "def euler_from_quaternion(w: Tensor, x: Tensor, y: Tensor, z: Tensor) -> tuple[Tensor, Tensor, Tensor]: KORNIA_CHECK(w.shape = = x.shape) KORNIA_CHECK(x.shape = = y.shape) KORNIA_CHECK(y.shape = = z.shape) yy = y * y sinr_cosp = 2.0 * (w * x + y * z) cosr_cosp = 1.0 - 2.0 * (x * x + yy) roll = sinr_cosp.atan2(cosr_cosp) sinp = 2.0 * (w * y - z * x) sinp = sinp.clamp(min = -1.0, max = 1.0) pitch = sinp.asin() siny_cosp = 2.0 * (w * z + x * y) cosy_cosp = 1.0 - 2.0 * (yy + z * z) yaw = siny_cosp.atan2(cosy_cosp) return (roll, pitch, yaw)", - "docstring": "Convert a quaternion coefficients to Euler angles. Returned angles are in radians in XYZ convention. Args: w: quaternion :math: coefficient. x: quaternion :math: coefficient. y: quaternion :math: coefficient. z: quaternion :math: coefficient. Return: A tuple with euler angles, , .", - "type": "function", - "file_path": "kornia\\kornia\\geometry\\conversions.py", - "ast_data": "FunctionDef name:euler_from_quaternion arguments arg:w type:Tensor arg:x type:Tensor arg:y type:Tensor arg:z type:Tensor Assign Assign Assign Assign Call call:atan2 Assign Assign Call call:clamp Assign Call call:asin Assign Assign Assign Call call:atan2 Return return:yes" - }, - { - "library": "tensorflow", - "name": "transform_feature", - "source_code": "def transform_feature(self, transformation_cache, state_manager): id_weight_pair = self.categorical_column.get_sparse_tensors(transformation_cache, state_manager) return self._transform_id_weight_pair(id_weight_pair, self.variable_shape[-1])", - "docstring": "Returns dense representing feature. Args: transformation_cache: A object to access features. state_manager: A to create / access resources such as lookup tables. Returns: Transformed feature . Raises: ValueError: if input rank is not known at graph building time.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", - "ast_data": "FunctionDef name:transform_feature arguments arg:self arg:transformation_cache arg:state_manager Assign Call call:get_sparse_tensors Return return:yes" - }, - { - "library": "pytorch", - "name": "serialize_model", - "source_code": "def serialize_model(module, inputs, *, config = None, return_shapes = None, use_int16_for_qint16 = False): return _NnapiSerializer(config, use_int16_for_qint16).serialize_model(module, inputs, return_shapes)", - "docstring": "Convert to NNAPI and serialize torchscript module. Parameters: module: Torchscript module to convert inputs: Tensors used to specify input details for NNAPI config (optional): Optional config to attach to module return_shapes (optional): Specify shape of outputs if your module uses runtime flexible shapes to set output buffer size for NNAPI use_int16_for_qint16 (optional): Use Pytorch int16 to represent NNAPI qint16 values", - "type": "function", - "file_path": "pytorch\\torch\\backends\\_nnapi\\serializer.py", - "ast_data": "FunctionDef name:serialize_model arguments arg:module arg:inputs Return return:yes" - }, - { - "library": "tensorflow", - "name": "strategy_supports_loss_scaling", - "source_code": "def strategy_supports_loss_scaling(): if not distribute_lib.has_strategy(): return True strategy = distribute_lib.get_strategy() return isinstance(strategy, (collective_all_reduce_strategy.CollectiveAllReduceStrategy, collective_all_reduce_strategy.CollectiveAllReduceStrategyV1, one_device_strategy.OneDeviceStrategy, one_device_strategy.OneDeviceStrategyV1, mirrored_strategy.MirroredStrategy, mirrored_strategy.MirroredStrategyV1))", - "docstring": "Returns True if the current Strategy supports loss scaling.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\loss_scale_optimizer.py", - "ast_data": "FunctionDef name:strategy_supports_loss_scaling arguments If Return return:yes Assign Call call:get_strategy Return return:yes" - }, - { - "library": "scipy", - "name": "gc_state", - "source_code": "@contextmanager def gc_state(state): orig_state = gc.isenabled() set_gc_state(state) yield set_gc_state(orig_state)", - "docstring": "Context manager to set state of garbage collector to Parameters ---------- state : bool True for gc enabled, False for disabled Examples -------- >>> with gc_state(False): ... assert not gc.isenabled() >>> with gc_state(True): ... assert gc.isenabled()", - "type": "function", - "file_path": "scipy\\scipy\\_lib\\_gcutils.py", - "ast_data": "FunctionDef name:gc_state arguments arg:state Assign Call call:isenabled" - }, - { - "library": "pygame", - "name": "read", - "source_code": "def read(): with open(PATH) as setup_in: return setup_in.read()", - "docstring": "Return the contents of the Windows Common Setup as a string", - "type": "function", - "file_path": "pygame\\buildconfig\\setup_win_common.py", - "ast_data": "FunctionDef name:read arguments With Return return:yes" - }, - { - "library": "numpy", - "name": "assemble_flags", - "source_code": "def assemble_flags(self, in_flags): if in_flags is None: return [] out_flags = [] for in_flag in in_flags: if callable(in_flag): out_flags + = in_flag(self) else: out_flags.append(in_flag) return out_flags", - "docstring": "Assemble flags from flag list Parameters ---------- in_flags : None or sequence None corresponds to empty list. Sequence elements can be strings or callables that return lists of strings. Callable takes as single parameter. Returns ------- out_flags : list", - "type": "method", - "file_path": "numpy\\numpy\\distutils\\command\\build_clib.py", - "ast_data": "FunctionDef name:assemble_flags arguments arg:self arg:in_flags If Compare op:Is Return return:yes Assign For If Call call:callable Return return:yes" - }, - { - "library": "scipy", - "name": "max", - "source_code": "def max(self, axis = None, out = None, *, explicit = False): return self._min_or_max(axis, out, np.maximum, explicit)", - "docstring": "Return the maximum of the array/matrix or maximum along an axis. By default, all elements are taken into account, not just the non-zero ones. But with set, only the stored elements are considered. Parameters ---------- axis : {-2, -1, 0, 1, None} optional Axis along which the sum is computed. The default is to compute the maximum over all elements, returning a scalar (i.e., = ). out : None, optional This argument is in the signature *solely* for NumPy compatibility reasons. Do not pass in anything except for the default value, as this argument is not used. explicit : {False, True} optional (default: False) When set to True, only the stored elements will be considered. If a row/column is empty, the sparse.coo_array returned has no stored element (i.e. an implicit zero) for that row/column. .. versionadded:: 1.15.0 Returns ------- amax : coo_array or scalar Maximum of . If is None, the result is a scalar value. If is given, the result is a sparse.coo_array of dimension ``. See Also -------- min : The minimum value of a sparse array/matrix along a given axis. numpy.max : NumPy's implementation of 'max'", - "type": "method", - "file_path": "scipy\\scipy\\sparse\\_data.py", - "ast_data": "FunctionDef name:max arguments arg:self arg:axis arg:out Return return:yes" - }, - { - "library": "pytorch", - "name": "ShapeComputeModule", - "source_code": "class ShapeComputeModule(torch.nn.Module): pass", - "docstring": "Code-gen-ed module for tensor shape computation. module.prepare will mutate ser_model according to the computed operand shapes, based on the shapes of args. Returns a list of output templates.", - "type": "class", - "file_path": "pytorch\\torch\\backends\\_nnapi\\prepare.py", - "ast_data": "ClassDef name:ShapeComputeModule" - }, - { - "library": "pytorch", - "name": "check_invariants", - "source_code": "def check_invariants(self, inputs: list[InputType]) -> tuple[CheckInvariantStatus, Callable[..., str]]: _logger = functools.partial(log_data_ptr_mismatch, self.wrapped_function.placeholders, inputs, self.static_input_data_ptrs) if not torch._C._tensors_data_ptrs_at_indices_equal(inputs, self.static_input_data_ptrs, self.cudagraph_managed_idxs): status = CheckInvariantStatus.CudagraphManagedIdxMismatch _logger = functools.partial(_logger, self.cudagraph_managed_idxs, status) return (status, _logger) if not self._check_liveness(self.expected_dead_indices_before_graph, self.path_weakrefs): status = CheckInvariantStatus.ExpectedDeadIndicesBeforeGraphMismatch return (status, lambda: f'{status}') if self.rerecord_if_static_inputs_change and (not torch._C._tensors_data_ptrs_at_indices_equal(inputs, self.static_input_data_ptrs, self.static_input_idxs)): status = CheckInvariantStatus.StaticInputIdxMismatch _logger = functools.partial(_logger, self.static_input_idxs, status) return (status, _logger) for idx in self.cudagraph_managed_idxs: if not self.preserved_aliased_inputs[idx]: inputs[idx] = None torch._check(self._check_liveness(self.expected_dead_indices_after_graph, self.path_weakrefs), lambda: 'TODO: graph recording observed an input tensor deallocate during graph recording that did not occur during replay. Please file an issue.') return (CheckInvariantStatus.SUCCESS, lambda: f'{CheckInvariantStatus.SUCCESS}')", - "docstring": "Checks if this node can be run. The same pattern of tensor liveness, static inputs, and tensors managed in the cudagraph private pool must remain stable.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py", - "ast_data": "FunctionDef name:check_invariants arguments arg:self arg:inputs type:list[InputType] Assign Call call:partial If Assign Assign Call call:partial Return return:yes If Assign Return return:yes If BoolOp Assign Assign Call call:partial Return return:yes For If Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "classify_jobs", - "source_code": "def classify_jobs(all_job_names: list[str], sha_grid: Any, filtered_jobs_names: set[str]) -> tuple[list[JobStatus], list[Any]]: job_data = map_job_data(all_job_names, sha_grid) job_statuses: list[JobStatus] = [] for job in job_data: job_statuses.append(JobStatus(job, job_data[job])) jobs_to_alert_on = [] flaky_jobs = [] for job_status in job_statuses: if job_status.job_name not in filtered_jobs_names: continue if job_status.should_alert(): jobs_to_alert_on.append(job_status) flaky_jobs.extend(job_status.flaky_jobs) return (jobs_to_alert_on, flaky_jobs)", - "docstring": "Creates Job Statuses which has the logic for if need to alert or if there's flaky jobs. Classifies jobs into jobs to alert on and flaky jobs. :param all_job_names: list of all job names as returned by the HUD :param sha_grid: list of all job data as returned by the HUD (parallel index to all_job_names) :param filtered_jobs_names: set of job names to actually consider :return:", - "type": "function", - "file_path": "pytorch\\tools\\alerts\\create_alerts.py", - "ast_data": "FunctionDef name:classify_jobs arguments arg:all_job_names type:list[str] arg:sha_grid type:Any arg:filtered_jobs_names type:set[str] Assign Call call:map_job_data For Assign Assign For If Compare op:NotIn If Call call:should_alert Return return:yes" - }, - { - "library": "scipy", - "name": "obrientransform", - "source_code": "def obrientransform(*args): data = argstoarray(*args).T v = data.var(axis = 0, ddof = 1) m = data.mean(0) n = data.count(0).astype(float) data - = m data ** = 2 data * = (n - 1.5) * n data - = 0.5 * v * (n - 1) data / = (n - 1.0) * (n - 2.0) if not ma.allclose(v, data.mean(0)): raise ValueError('Lack of convergence in obrientransform.') return data", - "docstring": "Computes a transform on input data (any number of columns). Used to test for homogeneity of variance prior to running one-way stats. Each array in `f_oneway()` run on the transformed data and found significant, variances are unequal. From Maxwell and Delaney, p.112. Returns: transformed data for use in an ANOVA", - "type": "function", - "file_path": "scipy\\scipy\\stats\\_mstats_basic.py", - "ast_data": "FunctionDef name:obrientransform arguments vararg:args Assign Assign Call call:var Assign Call call:mean Assign Call call:astype If Raise raises:ValueError('Lack of convergence in obrientransform.') Return return:yes" - }, - { - "library": "pytorch", - "name": "AutogradStateOpsFailSafeguard", - "source_code": "class AutogradStateOpsFailSafeguard(TorchFunctionMode): def __torch_function__(self, func, types, args = (), kwargs = None): kwargs = kwargs or {} unsupported_grad_mode_ops = [torch._C._set_grad_enabled] current_state = torch._C.is_grad_enabled() if func in unsupported_grad_mode_ops: assert len(args) = = 1 changed_state = args[0] mode = torch._C._get_dispatch_mode(torch._C._TorchDispatchModeKey.PROXY) if mode and isinstance(mode, ProxyTorchDispatchMode) and (not mode.pre_dispatch) and (changed_state ! = current_state): raise RuntimeError(f\"Encountered autograd state manager op {func} trying to change global autograd state while exporting. This is unsafe because we don't capture this op in torch.export today, hence we can't reflect the user intention soundly. You can fix this by adding a torch.no_grad() context around the export call.\") return func(*args, **kwargs)", - "docstring": "Detect grad state ops during exporting the graph and fail the process by raising an error, to avoid unexpected behavior. Those grad mode ops could be: Export with predispatch mode is exempted.", - "type": "class", - "file_path": "pytorch\\torch\\export\\_safeguard.py", - "ast_data": "ClassDef name:AutogradStateOpsFailSafeguard FunctionDef name:__torch_function__ arguments arg:self arg:func arg:types arg:args arg:kwargs Assign BoolOp Assign Assign Call call:is_grad_enabled If Compare op:In Assign Assign Call call:_get_dispatch_mode If BoolOp Call call:isinstance Compare op:NotEq Raise raises:RuntimeError(f\"Encountered autograd state manager op {func} trying to change global autograd state while exporting. This is unsafe because we don't capture this op in torch.export today, hence we can't reflect the user intention soundly. You can fix this by adding a torch.no_grad() context around the export call.\") Return return:yes" - }, - { - "library": "pytorch", - "name": "wrap", - "source_code": "def wrap(module: nn.Module, **wrap_overrides: Any) -> nn.Module: if _ConfigAutoWrap.in_autowrap_context: assert _ConfigAutoWrap.wrapper_cls is not None wrap_overrides = {**_ConfigAutoWrap.kwargs, **wrap_overrides} return _wrap(module, _ConfigAutoWrap.wrapper_cls, **wrap_overrides) return module", - "docstring": "Annotate that a module should be wrapped. Annotated modules will only be wrapped if inside of an :func: context manager. This allows a module to be initialized both with and without a wrapper without code change. The class that this function wraps the passed in `enable_wrapenable_wrap` context", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\fsdp\\wrap.py", - "ast_data": "FunctionDef name:wrap arguments arg:module type:nn.Module kwarg:wrap_overrides If Assign Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "gaussian_gradient_magnitude", - "source_code": "@_ni_docstrings.docfiller def gaussian_gradient_magnitude(input, sigma, output = None, mode = 'reflect', cval = 0.0, *, axes = None, **kwargs): input = np.asarray(input) def derivative(input, axis, output, mode, cval, sigma, **kwargs): order = [0] * input.ndim order[axis] = 1 return gaussian_filter(input, sigma, order, output, mode, cval, **kwargs) return generic_gradient_magnitude(input, derivative, output, mode, cval, extra_arguments = (sigma,), extra_keywords = kwargs, axes = axes)", - "docstring": "Multidimensional gradient magnitude using Gaussian derivatives. Parameters ---------- %(input)s sigma : scalar or sequence of scalars The standard deviations of the Gaussian filter are given for each axis as a sequence, or as a single number, in which case it is equal for all axes. %(output)s %(mode_multiple)s %(cval)s axes : tuple of int or None The axes over which to apply the filter. If or tuples are provided, their length must match the number of axes. Extra keyword arguments will be passed to gaussian_filter(). Returns ------- gaussian_gradient_magnitude : ndarray Filtered array. Has the same shape as . Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> fig = plt.figure() >>> plt.gray() # show the filtered result in grayscale >>> ax1 = fig.add_subplot(121) # left side >>> ax2 = fig.add_subplot(122) # right side >>> ascent = datasets.ascent() >>> result = ndimage.gaussian_gradient_magnitude(ascent, sigma=5) >>> ax1.imshow(ascent) >>> ax2.imshow(result) >>> plt.show()", - "type": "function", - "file_path": "scipy\\scipy\\ndimage\\_filters.py", - "ast_data": "FunctionDef name:gaussian_gradient_magnitude arguments arg:input arg:sigma arg:output arg:mode arg:cval kwarg:kwargs Assign Call call:asarray FunctionDef name:derivative arguments arg:input arg:axis arg:output arg:mode arg:cval arg:sigma kwarg:kwargs Assign Assign Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "lookup_cast", - "source_code": "def lookup_cast(self, lookup_type, internal_type = None): return '%s'", - "docstring": "Return the string to use in a query when performing lookups (\"contains\", \"like\", etc.). It should contain a '%s' placeholder for the column being searched against.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\operations.py", - "ast_data": "FunctionDef name:lookup_cast arguments arg:self arg:lookup_type arg:internal_type Return return:yes" - }, - { - "library": "salmon", - "name": "close_when_done", - "source_code": "def close_when_done(self): self.producer_fifo.append(None)", - "docstring": "automatically close this channel once the outgoing queue is empty", - "type": "method", - "file_path": "salmon\\salmon\\_vendor\\asynchat.py", - "ast_data": "FunctionDef name:close_when_done arguments arg:self" - }, - { - "library": "mongo", - "name": "is_mongos", - "source_code": "@property async def is_mongos(self) -> bool: return await self._server_property('server_type') = = SERVER_TYPE.Mongos", - "docstring": "If this client is connected to mongos. If the client is not connected, this will block until a connection is established or raise ServerSelectionTimeoutError if no server is available.", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\mongo_client.py", - "ast_data": "AsyncFunctionDef name:is_mongos arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_default_bbox_extra_artists", - "source_code": "def get_default_bbox_extra_artists(self): bbox_artists = [artist for artist in self.get_children() if artist.get_visible() and artist.get_in_layout()] for ax in self.axes: if ax.get_visible(): bbox_artists.extend(ax.get_default_bbox_extra_artists()) return bbox_artists", - "docstring": "Return a list of Artists typically used in .", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\figure.py", - "ast_data": "FunctionDef name:get_default_bbox_extra_artists arguments arg:self Assign For If Call call:get_visible Return return:yes" - }, - { - "library": "tensorflow", - "name": "scatter_mul", - "source_code": "def scatter_mul(self, sparse_delta, use_locking = False, name = None): if not isinstance(sparse_delta, indexed_slices.IndexedSlices): raise TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta) return gen_state_ops.scatter_mul(self._variable, sparse_delta.indices, sparse_delta.values, use_locking = use_locking, name = name)", - "docstring": "Multiply this variable by . Args: sparse_delta: to multiply this variable by. use_locking: If , use locking during the operation. name: the name of the operation. Returns: A that will hold the new value of this variable after the scattered multiplication has completed. Raises: TypeError: if is not an .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py", - "ast_data": "FunctionDef name:scatter_mul arguments arg:self arg:sparse_delta arg:use_locking arg:name If Raise raises:TypeError('sparse_delta is not IndexedSlices: %s' % sparse_delta) Return return:yes" - }, - { - "library": "pytorch", - "name": "get_freeable_input_buf", - "source_code": "def get_freeable_input_buf(nodes: list[BaseSchedulerNode], graph_inputs: OrderedSet[str]) -> dict[str, FreeableInputBuffer]: def _dep_size_hint(dep: Dep) -> int: res = 0 try: if not dep.has_unbacked_symbols(): res = dep.numbytes_hint() except KeyError: pass return res dep_name_to_succ_nodes: dict[str, OrderedSet[BaseSchedulerNode]] = collections.defaultdict(OrderedSet) dep_name_to_size: dict[str, int] = dict() for node in nodes: for dep in node.read_writes.reads: if dep.name in graph_inputs and (not dep.name.startswith(('primals_', 'arg', 'fwd_rng_state', 'bwd_rng_state'))): dep_name_to_succ_nodes[dep.name].add(node) dep_name_to_size[dep.name] = _dep_size_hint(dep) name_to_freeable_input_buf: dict[str, FreeableInputBuffer] = dict() for dep_name, succ_nodes in dep_name_to_succ_nodes.items(): name_to_freeable_input_buf[dep_name] = FreeableInputBuffer(dep_name, MemoryPlanningInfoForBuffer(size_free = dep_name_to_size[dep_name], succ_nodes = succ_nodes)) return name_to_freeable_input_buf", - "docstring": "Create and keep track of all input buffers that can be freed during the program Returns: A dictionary containing all freeble input buffers, keyed by their names.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\memory.py", - "ast_data": "FunctionDef name:get_freeable_input_buf arguments arg:nodes type:list[BaseSchedulerNode] arg:graph_inputs type:OrderedSet[str] FunctionDef name:_dep_size_hint arguments arg:dep type:Dep Assign Try If Assign Call call:numbytes_hint ExceptHandler Return return:yes For For If BoolOp Compare op:In Assign Call call:_dep_size_hint For Call call:items Assign Call call:FreeableInputBuffer Return return:yes" - }, - { - "library": "pytorch", - "name": "check", - "source_code": "@deprecated('`torch._prims_common.check` is deprecated and will be removed in the future. Please use `torch._check*` functions instead.', category = FutureWarning) def check(b: bool, s: Callable[[], str], exc_type: type[Exception] = RuntimeError) -> None: torch._check_with(exc_type, b, s)", - "docstring": "Helper function for raising an error_type (default: RuntimeError) if a boolean condition fails. Error message is a callable producing a string (to avoid wasting time string formatting in non-error case, and also to make it easier for torchdynamo to trace.) .. note:: This function is planned for removal in the future. Please use functions instead.", - "type": "function", - "file_path": "pytorch\\torch\\_prims_common\\__init__.py", - "ast_data": "FunctionDef name:check arguments arg:b type:bool arg:s type:Callable[[], str] arg:exc_type type:type[Exception] Call call:deprecated" - }, - { - "library": "pytorch", - "name": "NullKernelHandler", - "source_code": "class NullKernelHandler(NullHandler): def __init__(self): super().__init__() self.removed_buffers = OrderedSet[Any]() self.inplaced_to_remove = OrderedSet[Any]() self.index_dtype = 'tl.int64' def get_index_dtype_as_torch_dtype(self): import torch if self.index_dtype = = 'tl.int64': return torch.int64 elif self.index_dtype = = 'tl.int32': return torch.int32 else: raise ValueError(f'Unknown dtype: {self.index_dtype}')", - "docstring": "We need access in DeferredLine class when there is no kernel in the context. This happens when codegening the wrapper. Initialize and explicitly so we don't need call 'getattr' with default value which is error prone to typo in attribute name.", - "type": "class", - "file_path": "pytorch\\torch\\_inductor\\virtualized.py", - "ast_data": "ClassDef name:NullKernelHandler FunctionDef name:__init__ arguments arg:self Assign Call Assign Call Assign FunctionDef name:get_index_dtype_as_torch_dtype arguments arg:self If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes Raise raises:ValueError(f'Unknown dtype: {self.index_dtype}')" - }, - { - "library": "scikit-learn", - "name": "get_n_splits", - "source_code": "def get_n_splits(self, X = None, y = None, groups = None): return self.n_splits", - "docstring": "Returns the number of splitting iterations in the cross-validator. Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py", - "ast_data": "FunctionDef name:get_n_splits arguments arg:self arg:X arg:y arg:groups Return return:yes" - }, - { - "library": "scrapy", - "name": "initialized", - "source_code": "def initialized(self, response: Response | None = None) -> Any: return self.__dict__.pop('_postinit_reqs')", - "docstring": "This method must be set as the callback of your last initialization request. See self.init_request() docstring for more info.", - "type": "method", - "file_path": "scrapy\\scrapy\\spiders\\init.py", - "ast_data": "FunctionDef name:initialized arguments arg:self arg:response type:Response | None Return return:yes" - }, - { - "library": "matplotlib", - "name": "draw_image", - "source_code": "def draw_image(self, gc, x, y, im, transform = None): raise NotImplementedError", - "docstring": "Draw an RGBA image. Parameters ---------- gc : A graphics context with clipping information. x : float The distance in physical units (i.e., dots or pixels) from the left hand side of the canvas. y : float The distance in physical units (i.e., dots or pixels) from the bottom side of the canvas. im : (N, M, 4) array of An array of RGBA pixels. transform : If and only if the concrete backend is written such that returns `.Affine2DBase~.RendererBase.draw_image`. The translation vector of the transformation is given in physical units (i.e., dots or pixels). Note that the transformation does not override *x* and *y*, and has to be applied *before* translatingthe result by *x* and *y* (this can be accomplished by adding *x* and *y* to the translation vector defined by *transform*).", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", - "ast_data": "FunctionDef name:draw_image arguments arg:self arg:gc arg:x arg:y arg:im arg:transform Raise raises:NotImplementedError" - }, - { - "library": "tensorflow", - "name": "__debug_string__", - "source_code": "def __debug_string__(self): lines = [] to_process = [(self, 0)] while to_process: dataset, depth = to_process.pop() lines.append('-' * 2 * depth + repr(dataset)) to_process.extend([(ds, depth + 1) for ds in dataset._inputs()]) return '\\n'.join(lines)", - "docstring": "Returns a string showing the type of the dataset and its inputs. This string is intended only for debugging purposes, and may change without warning.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", - "ast_data": "FunctionDef name:__debug_string__ arguments arg:self Assign Assign While Assign Call call:pop Return return:yes" - }, - { - "library": "sphinx", - "name": "ensure_eol", - "source_code": "def ensure_eol(self) -> None: if self.body and self.body[-1][-1:] ! = '\\n': self.body.append('\\n')", - "docstring": "Ensure the last line in body is terminated by new line.", - "type": "method", - "file_path": "sphinx\\sphinx\\writers\\texinfo.py", - "ast_data": "FunctionDef name:ensure_eol arguments arg:self If BoolOp Compare op:NotEq" - }, - { - "library": "kornia", - "name": "AEPE", - "source_code": "class AEPE(nn.Module): def __init__(self, reduction: str = 'mean') -> None: super().__init__() self.reduction: str = reduction def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch.Tensor: return aepe(input, target, self.reduction)", - "docstring": "Computes the average endpoint error (AEPE) between 2 flow maps. EPE is the endpoint error between two 2D vectors (e.g., optical flow). Given a h x w x 2 optical flow map, the AEPE is: .. math:: \\text{AEPE}=\\frac{1}{hw}\\sum_{i=1, j=1}^{h, w}\\sqrt{(I_{i,j,1}-T_{i,j,1})^{2}+(I_{i,j,2}-T_{i,j,2})^{2}} Args: reduction : Specifies the reduction to apply to the output: `(*, 2)(*, 2)(1)`. Examples: >>> input1 = torch.rand(1, 4, 5, 2) >>> input2 = torch.rand(1, 4, 5, 2) >>> epe = AEPE(reduction=\"mean\") >>> epe = epe(input1, input2)", - "type": "class", - "file_path": "kornia\\kornia\\metrics\\endpoint_error.py", - "ast_data": "ClassDef name:AEPE FunctionDef name:__init__ arguments arg:self arg:reduction type:str FunctionDef name:forward arguments arg:self arg:input type:torch.Tensor arg:target type:torch.Tensor Return return:yes" - }, - { - "library": "scikit-learn", - "name": "devices", - "source_code": "def devices(self) -> list[_Device]: return ['cpu', _DASK_DEVICE]", - "docstring": "The devices supported by Dask. For Dask, this always returns ``. Returns ------- devices : list[Device] The devices supported by Dask. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_device, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes Examples -------- >>> info = xp.__array_namespace_info__() >>> info.devices() ['cpu', DASK_DEVICE]", - "type": "method", - "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\dask\\array\\_info.py", - "ast_data": "FunctionDef name:devices arguments arg:self Return return:yes" - }, - { - "library": "salmon", - "name": "render_pep440_post", - "source_code": "def render_pep440_post(pieces: Dict[str, Any]) -> str: if pieces['closest-tag']: rendered = pieces['closest-tag'] if pieces['distance'] or pieces['dirty']: rendered + = '.post%d' % pieces['distance'] if pieces['dirty']: rendered + = '.dev0' rendered + = plus_or_dot(pieces) rendered + = 'g%s' % pieces['short'] else: rendered = '0.post%d' % pieces['distance'] if pieces['dirty']: rendered + = '.dev0' rendered + = '+g%s' % pieces['short'] return rendered", - "docstring": "TAG[.postDISTANCE[.dev0]+gHEX] . The \".dev0\" means dirty. Note that .dev0 sorts backwards (a dirty tree will appear \"older\" than the corresponding clean one), but you shouldn't be releasing software with -dirty anyways. Exceptions: 1: no tags. 0.postDISTANCE[.dev0]", - "type": "function", - "file_path": "salmon\\versioneer.py", - "ast_data": "FunctionDef name:render_pep440_post arguments arg:pieces type:Dict[str, Any] If Assign If BoolOp If Assign If Return return:yes" - }, - { - "library": "pytorch", - "name": "config_list", - "source_code": "def config_list(**configs): generated_configs = [] reserved_names = ['attrs', 'attr_names', 'tags'] if any((attr not in configs for attr in reserved_names)): raise ValueError('Missing attrs in configs') _validate(configs) cross_configs = None if 'cross_product_configs' in configs: cross_configs = cross_product_configs(**configs['cross_product_configs']) for inputs in configs['attrs']: tmp_result = [{configs['attr_names'][i]: input_value} for i, input_value in enumerate(inputs)] tmp_result.append({'tags': '_'.join(configs['tags'])}) if cross_configs: generated_configs + = [tmp_result + list(config) for config in cross_configs] else: generated_configs.append(tmp_result) return generated_configs", - "docstring": "Generate configs based on the list of input shapes. This function will take input shapes specified in a list from user. Besides that, all other parameters will be cross producted first and each of the generated list will be merged with the input shapes list. Reserved Args: attr_names(reserved): a list of names for input shapes. attrs(reserved): a list of values for each input shape. corss_product: a dictionary of attributes which will be cross producted with the input shapes. tags(reserved): a tag used to filter inputs. Here is an example: attrs = [ [1, 2], [4, 5], ], attr_names = ['M', 'N'], cross_product_configs={ 'device': ['cpu', 'cuda'], }, we will generate [[{'M': 1}, {'N' : 2}, {'device' : 'cpu'}], [{'M': 1}, {'N' : 2}, {'device' : 'cuda'}], [{'M': 4}, {'N' : 5}, {'device' : 'cpu'}], [{'M': 4}, {'N' : 5}, {'device' : 'cuda'}]]", - "type": "function", - "file_path": "pytorch\\benchmarks\\operator_benchmark\\benchmark_utils.py", - "ast_data": "FunctionDef name:config_list arguments kwarg:configs Assign Assign If Call call:any Raise raises:ValueError('Missing attrs in configs') Assign If Compare op:In Assign Call call:cross_product_configs For Assign If Return return:yes" - }, - { - "library": "tensorflow", - "name": "inbound_nodes", - "source_code": "@property @doc_controls.do_not_doc_inheritable def inbound_nodes(self): return self._inbound_nodes", - "docstring": "Deprecated, do NOT use! Only for compatibility with external Keras.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", - "ast_data": "FunctionDef name:inbound_nodes arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "modules_to_mkldnn", - "source_code": "def modules_to_mkldnn(nodes: list[fx.Node], modules: dict[str, nn.Module]): old_modules: dict[nn.Module, nn.Module] = {} for node in nodes: if node.op = = 'call_module': assert isinstance(node.target, str) cur_module = modules[node.target] if type(cur_module) in mkldnn_map: new_module = mkldnn_map[type(cur_module)](cur_module, torch.float) assert isinstance(new_module, nn.Module) old_modules[new_module] = copy.deepcopy(cur_module) replace_node_module(node, modules, new_module) return old_modules", - "docstring": "For each node, if it's a module that can be preconverted into MKLDNN, then we do so and create a mapping to allow us to convert from the MKLDNN version of the module to the original.", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\optimization.py", - "ast_data": "FunctionDef name:modules_to_mkldnn arguments arg:nodes type:list[fx.Node] arg:modules type:dict[str, nn.Module] For If Compare op:Eq Assign If Compare op:In Assign Call Assign Call call:deepcopy Return return:yes" - }, - { - "library": "scipy", - "name": "mahalanobis", - "source_code": "def mahalanobis(u, v, VI): u = _validate_vector(u) v = _validate_vector(v) VI = np.atleast_2d(VI) delta = u - v m = np.dot(np.dot(delta, VI), delta) return np.sqrt(m)", - "docstring": "Compute the Mahalanobis distance between two 1-D arrays. The Mahalanobis distance between 1-D arrays and , is defined as .. math:: \\sqrt{ (u-v) V^{-1} (u-v)^T } where `VIuv`. Examples -------- >>> from scipy.spatial import distance >>> iv = [[1, 0.5, 0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]] >>> distance.mahalanobis([1, 0, 0], [0, 1, 0], iv) 1.0 >>> distance.mahalanobis([0, 2, 0], [0, 1, 0], iv) 1.0 >>> distance.mahalanobis([2, 0, 0], [0, 1, 0], iv) 1.7320508075688772", - "type": "function", - "file_path": "scipy\\scipy\\spatial\\distance.py", - "ast_data": "FunctionDef name:mahalanobis arguments arg:u arg:v arg:VI Assign Call call:_validate_vector Assign Call call:_validate_vector Assign Call call:atleast_2d Assign Assign Call call:dot Return return:yes" - }, - { - "library": "kornia", - "name": "list_models", - "source_code": "@classmethod def list_models(cls) -> None: repo_contents = cls._fetch_repo_contents('models') models = [file['path'] for file in repo_contents] pprint.pp(models)", - "docstring": "List all available ONNX models in the 'models' folder of the Hugging Face repository.", - "type": "method", - "file_path": "kornia\\kornia\\onnx\\utils.py", - "ast_data": "FunctionDef name:list_models arguments arg:cls Assign Call call:_fetch_repo_contents Assign" - }, - { - "library": "algorithms", - "name": "gcd_bit", - "source_code": "def gcd_bit(a, b): tza = trailing_zero(a) tzb = trailing_zero(b) a >> = tza b >> = tzb while b: if a < b: a, b = (b, a) a - = b a >> = trailing_zero(a) return a << min(tza, tzb)", - "docstring": "Similar to gcd but uses bitwise operators and less error handling.", - "type": "function", - "file_path": "algorithms\\algorithms\\maths\\gcd.py", - "ast_data": "FunctionDef name:gcd_bit arguments arg:a arg:b Assign Call call:trailing_zero Assign Call call:trailing_zero While If Compare op:Lt Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "check_isinstance", - "source_code": "def check_isinstance(types, /, **kwargs): none_type = type(None) types = (types,) if isinstance(types, type) else (none_type,) if types is None else tuple((none_type if tp is None else tp for tp in types)) def type_name(tp): return 'None' if tp is none_type else tp.__qualname__ if tp.__module__ = = 'builtins' else f'{tp.__module__}.{tp.__qualname__}' for k, v in kwargs.items(): if not isinstance(v, types): names = [*map(type_name, types)] if 'None' in names: names.remove('None') names.append('None') raise TypeError('{!r} must be an instance of {}, not a {}'.format(k, ', '.join(names[: -1]) + ' or ' + names[-1] if len(names) > 1 else names[0], type_name(type(v))))", - "docstring": "For each *key, value* pair in *kwargs*, check that *value* is an instance of one of *types*; if not, raise an appropriate TypeError. As a special case, a `` entry in *types* is treated as NoneType. Examples -------- >>> _api.check_isinstance((SomeClass, None), arg=arg)", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\_api\\__init__.py", - "ast_data": "FunctionDef name:check_isinstance arguments kwarg:kwargs Assign Call call:type Assign FunctionDef name:type_name arguments arg:tp Return return:yes For Call call:items If Assign If Compare op:In Raise raises:TypeError('{!r} must be an instance of {}, not a {}'.format(k, ', '.join(names[:-1]) + ' or ' + names[-1] if len(names) > 1 else names[0], type_name(type(v))))" - }, - { - "library": "django", - "name": "AggregateQuery", - "source_code": "class AggregateQuery(Query): compiler = 'SQLAggregateCompiler' def __init__(self, model, inner_query): self.inner_query = inner_query super().__init__(model)", - "docstring": "Take another query as a parameter to the FROM clause and only select the elements in the provided list.", - "type": "class", - "file_path": "django\\django\\db\\models\\sql\\subqueries.py", - "ast_data": "ClassDef name:AggregateQuery Assign FunctionDef name:__init__ arguments arg:self arg:model arg:inner_query Assign" - }, - { - "library": "tensorflow", - "name": "num_inner_dimensions", - "source_code": "@property def num_inner_dimensions(self): return tensor_shape.dimension_value(self._inner_dim_sizes.shape[0])", - "docstring": "The number of inner dimensions, or if not statically known.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py", - "ast_data": "FunctionDef name:num_inner_dimensions arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "generate_from_torch_refs", - "source_code": "@classmethod def generate_from_torch_refs(cls) -> set[ElementwiseTypePromotionRule]: rule_set = set() rule_set.update(cls._parse_torch_refs(_refs)) rule_set.update(cls._parse_torch_refs(_nn_refs)) rule_set.update(cls._parse_torch_refs(_linalg_refs)) rule_set.update(cls._parse_torch_refs(_special_refs)) rule_set.update(cls._parse_torch_refs(_functional_refs)) return rule_set", - "docstring": "Parse type promotion rules from reference ops under torch._C._refs.", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\type_promotion.py", - "ast_data": "FunctionDef name:generate_from_torch_refs arguments arg:cls Assign Call call:set Return return:yes" - }, - { - "library": "scipy", - "name": "__call__", - "source_code": "def __call__(self, alpha1, phi0 = None, derphi0 = None, maxiter = 100): if phi0 is None: phi0 = self.phi(0.0) if derphi0 is None: derphi0 = self.derphi(0.0) phi1 = phi0 derphi1 = derphi0 task = b'START' for i in range(maxiter): stp, phi1, derphi1, task = self._iterate(alpha1, phi1, derphi1, task) if not np.isfinite(stp): task = b'WARN' stp = None break if task[: 2] = = b'FG': alpha1 = stp phi1 = self.phi(stp) derphi1 = self.derphi(stp) else: break else: stp = None task = b'WARNING: dcsrch did not converge within max iterations' if task[: 5] = = b'ERROR' or task[: 4] = = b'WARN': stp = None return (stp, phi1, phi0, task)", - "docstring": "Parameters ---------- alpha1 : float alpha1 is the current estimate of a satisfactory step. A positive initial estimate must be provided. phi0 : float the value of at 0 (if known). derphi0 : float the derivative of at 0 (if known). maxiter : int Returns ------- alpha : float Step size, or None if no suitable step was found. phi : float Value of at the new point . phi0 : float Value of at . task : bytes On exit task indicates status information. If task[:4] == b'CONV' then the search is successful. If task[:4] == b'WARN' then the subroutine is not able to satisfy the convergence conditions. The exit value of stp contains the best point found during the search. If task[:5] == b'ERROR' then there is an error in the input arguments.", - "type": "method", - "file_path": "scipy\\scipy\\optimize\\_dcsrch.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:alpha1 arg:phi0 arg:derphi0 arg:maxiter If Compare op:Is Assign Call call:phi If Compare op:Is Assign Call call:derphi Assign Assign Assign For Call call:range Assign Call call:_iterate If Assign Assign If Compare op:Eq Assign Assign Call call:phi Assign Call call:derphi Assign Assign If BoolOp Compare op:Eq Compare op:Eq Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "connect", - "source_code": "@staticmethod def connect(tpu = None, zone = None, project = None): resolver = TPUClusterResolver(tpu, zone, project) remote.connect_to_cluster(resolver) tpu_strategy_util.initialize_tpu_system_impl(resolver, TPUClusterResolver) return resolver", - "docstring": "Initializes TPU and returns a TPUClusterResolver. This API will connect to remote TPU cluster and initialize the TPU hardwares. Example usage: >>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver.connect( ... tpu='') It can be viewed as convenient wrapper of the following code: >>> resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu='') >>> tf.config.experimental_connect_to_cluster(resolver) >>> tf.tpu.experimental.initialize_tpu_system(resolver) Args: tpu: A string corresponding to the TPU to use. It can be the TPU name or TPU worker gRPC address. If not set, it will try automatically resolve the TPU address on Cloud TPUs. zone: Zone where the TPUs are located. If omitted or empty, we will assume that the zone of the TPU is the same as the zone of the GCE VM, which we will try to discover from the GCE metadata service. project: Name of the GCP project containing Cloud TPUs. If omitted or empty, we will try to discover the project name of the GCE VM from the GCE metadata service. Returns: An instance of TPUClusterResolver object. Raises: NotFoundError: If no TPU devices found in eager mode.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tpu\\tpu_cluster_resolver.py", - "ast_data": "FunctionDef name:connect arguments arg:tpu arg:zone arg:project Assign Call call:TPUClusterResolver Return return:yes" - }, - { - "library": "mongo", - "name": "QueryType", - "source_code": "class QueryType(str, enum.Enum): EQUALITY = 'equality' 'Used to encrypt a value for an equality query.' RANGE = 'range' 'Used to encrypt a value for a range query.\\n\\n .. versionadded: : 4.9\\n ' RANGEPREVIEW = 'RangePreview' '**DEPRECATED** - Used to encrypt a value for a rangePreview query.\\n\\n .. note: : Support for RangePreview is deprecated. Use: attr: `QueryType.RANGE` instead.\\n\\n .. versionadded: : 4.4\\n '", - "docstring": "An enum that defines the supported values for explicit encryption query_type. .. versionadded:: 4.2", - "type": "class", - "file_path": "mongo\\pymongo\\synchronous\\encryption.py", - "ast_data": "ClassDef name:QueryType Assign Assign Assign" - }, - { - "library": "tensorflow", - "name": "activity_regularizer", - "source_code": "@activity_regularizer.setter def activity_regularizer(self, regularizer): self._activity_regularizer = regularizer", - "docstring": "Optional regularizer function for the output of this layer.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", - "ast_data": "FunctionDef name:activity_regularizer arguments arg:self arg:regularizer Assign" - }, - { - "library": "django", - "name": "get_preferred_type", - "source_code": "def get_preferred_type(self, media_types): if not media_types or not self.accepted_types: return None desired_types = [(accepted_type, media_type) for media_type in media_types if (accepted_type: = self.accepted_type(media_type)) is not None] if not desired_types: return None return min(desired_types, key = lambda t: self.accepted_types.index(t[0]))[1]", - "docstring": "Select the preferred media type from the provided options.", - "type": "method", - "file_path": "django\\django\\http\\request.py", - "ast_data": "FunctionDef name:get_preferred_type arguments arg:self arg:media_types If BoolOp Return return:yes Assign If Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_setter", - "source_code": "@tf_export('__internal__.saved_model.load.get_setter', v1 = []) def get_setter(proto): _, type_registrations = _REVIVED_TYPE_REGISTRY.get(proto.identifier, (None, None)) if type_registrations is not None: for type_registration in type_registrations: if type_registration.should_load(proto): return type_registration.setter return None", - "docstring": "Gets the registered setter function for the SavedUserObject proto. See VersionedTypeRegistration for info about the setter function. Args: proto: SavedUserObject proto Returns: setter function", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\saved_model\\revived_types.py", - "ast_data": "FunctionDef name:get_setter arguments arg:proto Call call:tf_export Assign Call call:get If Compare op:IsNot For If Call call:should_load Return return:yes Return return:yes" - }, - { - "library": "sphinx", - "name": "cell", - "source_code": "def cell(self, row: int | None = None, col: int | None = None) -> TableCell | None: try: if row is None: row = self.row if col is None: col = self.col return TableCell(self, row, col) except IndexError: return None", - "docstring": "Returns a cell object (i.e. rectangular area) containing given position. If no option arguments: `` are used to get a cell object by default.", - "type": "method", - "file_path": "sphinx\\sphinx\\writers\\latex.py", - "ast_data": "FunctionDef name:cell arguments arg:self arg:row type:int | None arg:col type:int | None Try If Compare op:Is Assign If Compare op:Is Assign Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "scrapy", - "name": "install_shutdown_handlers", - "source_code": "def install_shutdown_handlers(function: SignalHandlerT, override_sigint: bool = True) -> None: signal.signal(signal.SIGTERM, function) if signal.getsignal(signal.SIGINT) = = signal.default_int_handler or override_sigint: signal.signal(signal.SIGINT, function) if hasattr(signal, 'SIGBREAK'): signal.signal(signal.SIGBREAK, function)", - "docstring": "Install the given function as a signal handler for all common shutdown signals (such as SIGINT, SIGTERM, etc). If `` the SIGINT handler won't be installed if there is already a handler in place (e.g. Pdb)", - "type": "function", - "file_path": "scrapy\\scrapy\\utils\\ossignal.py", - "ast_data": "FunctionDef name:install_shutdown_handlers arguments arg:function type:SignalHandlerT arg:override_sigint type:bool If BoolOp Compare op:Eq If Call call:hasattr" - }, - { - "library": "tensorflow", - "name": "get_unsharded_shape", - "source_code": "def get_unsharded_shape(self, shapes): self._fill_default_values() if len(shapes) ! = self.number_of_shards: raise ValueError(f'Shapes {shapes} is length {len(shapes)} but must be a list of length number_of_shards = {self.number_of_shards}') unsharded_shapes = [self._unshard_shape(s) for s in shapes] for i in range(self.number_of_shards - 1): if not unsharded_shapes[i].is_compatible_with(unsharded_shapes[self.number_of_shards - 1]): raise ValueError(f'Sharded shapes {shapes} are not consistent shards of a full shape sharded {self.number_of_shards} ways along dimension {self.shard_dimension}.') return unsharded_shapes[0]", - "docstring": "Returns the shape of an unsharded Tensor given a list of shards. When given a list of shapes of shards, returns the shape of the unsharded Tensor that would generate the shards. Sets defaults for the policy if number_of_shards or shard_dimension is None. Args: shapes: The shapes of the Tensor shards to be combined. Returns: The shape of the unsharded version of the Tensor. Raises: ValueError: if shapes is not a list of length self.number_of_shards; or any element of shapes is not a valid shape consistent with the sharding policy; or the list of shapes is not a valid sharding of a full shape. TypeError: if an element of shapes is not convertible to a TensorShape", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_sharding.py", - "ast_data": "FunctionDef name:get_unsharded_shape arguments arg:self arg:shapes If Compare op:NotEq Raise raises:ValueError(f'Shapes {shapes} is length {len(shapes)} but must be a list of length number_of_shards={self.number_of_shards}') Assign For Call call:range If Raise raises:ValueError(f'Sharded shapes {shapes} are not consistent shards of a full shape sharded {self.number_of_shards} ways along dimension {self.shard_dimension}.') Return return:yes" - }, - { - "library": "scikit-learn", - "name": "get_params", - "source_code": "def get_params(self, deep = True): return self._get_params('_transformers', deep = deep)", - "docstring": "Get parameters for this estimator. Returns the parameters given in the constructor as well as the estimators contained within the of the . Parameters ---------- deep : bool, default=True If True, will return the parameters for this estimator and contained subobjects that are estimators. Returns ------- params : dict Parameter names mapped to their values.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py", - "ast_data": "FunctionDef name:get_params arguments arg:self arg:deep Return return:yes" - }, - { - "library": "tensorflow", - "name": "split_sequence_columns_v2", - "source_code": "def split_sequence_columns_v2(feature_columns): sequence_columns = [] non_sequence_columns = [] for column in feature_columns: if not isinstance(column, (_TPUEmbeddingColumnV2, _TPUSharedEmbeddingColumnV2)): raise TypeError(f'column must be a _TPUEmbeddingColumnV2 or _TPUSharedEmbeddingColumnV2 but got {type(column)} instead.') if column.is_sequence_column(): sequence_columns.append(column) else: non_sequence_columns.append(column) return (sequence_columns, non_sequence_columns)", - "docstring": "Split a list of _TPUEmbeddingColumn into sequence and non-sequence columns. For use in a TPUEstimator model_fn function. E.g. def model_fn(features): sequence_columns, feature_columns = ( tf.tpu.feature_column.split_sequence_columns(feature_columns)) input = tf.feature_column.input_layer( features=features, feature_columns=feature_columns) sequence_features, sequence_lengths = ( tf.contrib.feature_column.sequence_input_layer( features=features, feature_columns=sequence_columns)) Args: feature_columns: A list of _TPUEmbeddingColumns to split. Returns: Two lists of _TPUEmbeddingColumns, the first is the sequence columns and the second is the non-sequence columns.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\feature_column_v2.py", - "ast_data": "FunctionDef name:split_sequence_columns_v2 arguments arg:feature_columns Assign Assign For If Raise raises:TypeError(f'column must be a _TPUEmbeddingColumnV2 or _TPUSharedEmbeddingColumnV2 but got {type(column)} instead.') If Call call:is_sequence_column Return return:yes" - }, - { - "library": "tensorflow", - "name": "sparse_reduce_sum_sparse", - "source_code": "@tf_export(v1 = ['sparse.reduce_sum_sparse', 'sparse_reduce_sum_sparse']) @deprecation.deprecated_endpoints('sparse_reduce_sum_sparse') @deprecation.deprecated_args(None, 'keep_dims is deprecated, use keepdims instead', 'keep_dims') def sparse_reduce_sum_sparse(sp_input, axis = None, keepdims = None, reduction_axes = None, keep_dims = None): keepdims = deprecation.deprecated_argument_lookup('keepdims', keepdims, 'keep_dims', keep_dims) axis = deprecation.deprecated_argument_lookup('axis', axis, 'reduction_axes', reduction_axes) if keepdims is None: keepdims = False output_ind, output_val, output_shape = gen_sparse_ops.sparse_reduce_sum_sparse(sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis), keepdims) return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)", - "docstring": "Computes the sum of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to . In contrast to SparseReduceSum, this Op returns a SparseTensor. Note: A gradient is not defined for this function, so it can't be used in training models that need gradient descent. Reduces along the dimensions given in . Unless is true, the rank of the tensor is reduced by 1 for each entry in . If is true, the reduced dimensions are retained with length 1. If has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, which are interpreted according to the indexing rules in Python. Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of axis. keep_dims: Deprecated alias for . Returns: The reduced SparseTensor.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py", - "ast_data": "FunctionDef name:sparse_reduce_sum_sparse arguments arg:sp_input arg:axis arg:keepdims arg:reduction_axes arg:keep_dims Call call:tf_export Call call:deprecated_endpoints Call call:deprecated_args Assign Call call:deprecated_argument_lookup Assign Call call:deprecated_argument_lookup If Compare op:Is Assign Assign Call call:sparse_reduce_sum_sparse Return return:yes" - }, - { - "library": "django", - "name": "add_dependency", - "source_code": "def add_dependency(self, migration, child, parent, skip_validation = False): if child not in self.nodes: error_message = 'Migration %s dependencies reference nonexistent child node %r' % (migration, child) self.add_dummy_node(child, migration, error_message) if parent not in self.nodes: error_message = 'Migration %s dependencies reference nonexistent parent node %r' % (migration, parent) self.add_dummy_node(parent, migration, error_message) self.node_map[child].add_parent(self.node_map[parent]) self.node_map[parent].add_child(self.node_map[child]) if not skip_validation: self.validate_consistency()", - "docstring": "This may create dummy nodes if they don't yet exist. If , validate_consistency() should be called afterward.", - "type": "method", - "file_path": "django\\django\\db\\migrations\\graph.py", - "ast_data": "FunctionDef name:add_dependency arguments arg:self arg:migration arg:child arg:parent arg:skip_validation If Compare op:NotIn Assign If Compare op:NotIn Assign If" - }, - { - "library": "pandas", - "name": "__delitem__", - "source_code": "def __delitem__(self, key: _KT) -> None: for mapping in self.maps: if key in mapping: del mapping[key] return raise KeyError(key)", - "docstring": "Raises ------ KeyError If doesn't exist.", - "type": "method", - "file_path": "pandas\\pandas\\core\\computation\\scope.py", - "ast_data": "FunctionDef name:__delitem__ arguments arg:self arg:key type:_KT For If Compare op:In Return return:no Raise raises:KeyError(key)" - }, - { - "library": "pandas", - "name": "read_table", - "source_code": "def read_table(self, table_name: str, index_col: str | list[str] | None = None, coerce_float: bool = True, parse_dates = None, columns = None, schema: str | None = None, chunksize: int | None = None, dtype_backend: DtypeBackend | Literal['numpy'] = 'numpy') -> DataFrame | Iterator[DataFrame]: self.meta.reflect(bind = self.con, only = [table_name], views = True) table = SQLTable(table_name, self, index = index_col, schema = schema) if chunksize is not None: self.returns_generator = True return table.read(self.exit_stack, coerce_float = coerce_float, parse_dates = parse_dates, columns = columns, chunksize = chunksize, dtype_backend = dtype_backend)", - "docstring": "Read SQL database table into a DataFrame. Parameters ---------- table_name : str Name of SQL table in database. index_col : string, optional, default: None Column to set as index. coerce_float : bool, default True Attempts to convert values of non-string, non-numeric objects (like decimal.Decimal) to floating point. This can result in loss of precision. parse_dates : list or dict, default: None - List of column names to parse as dates. - Dict of `pandas.to_datetimechunksizeDataFrameDataFrameArrowDtypeDataFrame` .. versionadded:: 2.0 Returns ------- DataFrame See Also -------- pandas.read_sql_table SQLDatabase.read_query", - "type": "method", - "file_path": "pandas\\pandas\\io\\sql.py", - "ast_data": "FunctionDef name:read_table arguments arg:self arg:table_name type:str arg:index_col type:str | list[str] | None arg:coerce_float type:bool arg:parse_dates arg:columns arg:schema type:str | None arg:chunksize type:int | None arg:dtype_backend type:DtypeBackend | Literal['numpy'] Assign Call call:SQLTable If Compare op:IsNot Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, shape, dtype = dtypes.float32, name = None): self._shape = tensor_shape.TensorShape(shape) self._dtype = dtypes.as_dtype(dtype) self._name = name", - "docstring": "Creates a TensorSpec. Args: shape: Value convertible to . The shape of the tensor. dtype: Value convertible to . The type of the tensor values. name: Optional name for the Tensor. Raises: TypeError: If shape is not convertible to a , or dtype is not convertible to a .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:shape arg:dtype arg:name Assign Call call:TensorShape Assign Call call:as_dtype Assign" - }, - { - "library": "authlib", - "name": "refresh_token", - "source_code": "def refresh_token(self, url = None, refresh_token = None, body = '', auth = None, headers = None, **kwargs): session_kwargs = self._extract_session_request_params(kwargs) refresh_token = refresh_token or self.token.get('refresh_token') if 'scope' not in kwargs and self.scope: kwargs['scope'] = self.scope body = prepare_token_request('refresh_token', body, refresh_token = refresh_token, **kwargs) if headers is None: headers = DEFAULT_HEADERS.copy() if url is None: url = self.metadata.get('token_endpoint') for hook in self.compliance_hook['refresh_token_request']: url, headers, body = hook(url, headers, body) if auth is None: auth = self.client_auth(self.token_endpoint_auth_method) return self._refresh_token(url, refresh_token = refresh_token, body = body, headers = headers, auth = auth, **session_kwargs)", - "docstring": "Fetch a new access token using a refresh token. :param url: Refresh Token endpoint, must be HTTPS. :param refresh_token: The refresh_token to use. :param body: Optional application/x-www-form-urlencoded body to add the include in the token request. Prefer kwargs over body. :param auth: An auth tuple or method as accepted by requests. :param headers: Dict to default request headers with. :return: A :class: object (a dict too).", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\client.py", - "ast_data": "FunctionDef name:refresh_token arguments arg:self arg:url arg:refresh_token arg:body arg:auth arg:headers kwarg:kwargs Assign Call call:_extract_session_request_params Assign BoolOp Call call:get If BoolOp Compare op:NotIn Assign Assign Call call:prepare_token_request If Compare op:Is Assign Call call:copy If Compare op:Is Assign Call call:get For Assign Call call:hook If Compare op:Is Assign Call call:client_auth Return return:yes" - }, - { - "library": "scikit-learn", - "name": "transform", - "source_code": "def transform(self, X): check_is_fitted(self) X = validate_data(self, X, reset = False) X = X - self.mean_ U = ridge_regression(self.components_.T, X.T, self.ridge_alpha, solver = 'cholesky') return U", - "docstring": "Least Squares projection of the data onto the sparse components. To avoid instability issues in case the system is under-determined, regularization can be applied (Ridge regression) via the parameter. Note that Sparse PCA components orthogonality is not enforced as in PCA hence one cannot use a simple linear projection. Parameters ---------- X : ndarray of shape (n_samples, n_features) Test data to be transformed, must have the same number of features as the data used to train the model. Returns ------- X_new : ndarray of shape (n_samples, n_components) Transformed data.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\decomposition\\_sparse_pca.py", - "ast_data": "FunctionDef name:transform arguments arg:self arg:X Assign Call call:validate_data Assign Assign Call call:ridge_regression Return return:yes" - }, - { - "library": "tensorflow", - "name": "check_generator_arguments", - "source_code": "def check_generator_arguments(y = None, sample_weight = None, validation_split = None): if y is not None: raise ValueError('`y` argument is not supported when data isa generator or Sequence instance. Instead pass targets as the second element of the generator.') if sample_weight is not None: raise ValueError('`sample_weight` argument is not supported when data isa generator or Sequence instance. Instead pass sample weights as the third element of the generator.') if validation_split: raise ValueError('If your data is in the form of a Python generator, you cannot use `validation_split`.')", - "docstring": "Validates arguments passed when using a generator.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", - "ast_data": "FunctionDef name:check_generator_arguments arguments arg:y arg:sample_weight arg:validation_split If Compare op:IsNot Raise raises:ValueError('`y` argument is not supported when data isa generator or Sequence instance. Instead pass targets as the second element of the generator.') If Compare op:IsNot Raise raises:ValueError('`sample_weight` argument is not supported when data isa generator or Sequence instance. Instead pass sample weights as the third element of the generator.') If Raise raises:ValueError('If your data is in the form of a Python generator, you cannot use `validation_split`.')" - }, - { - "library": "pytorch", - "name": "is_closed", - "source_code": "@abstractmethod def is_closed(self) -> bool: pass", - "docstring": "Check whether the rendezvous has been closed. A closed rendezvous means all future attempts to re-rendezvous within same job will fail. `set_closed` have semantics of eventual propagation and should not be used for synchronization. The intention is that if at least one node decides the job is finished, it will close the rendezvous, and other nodes will soon observe this and stop running as well.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\api.py", - "ast_data": "FunctionDef name:is_closed arguments arg:self" - }, - { - "library": "scipy", - "name": "names", - "source_code": "def names(self): return list(self._attributes)", - "docstring": "Return the list of attribute names. Returns ------- attrnames : list of str The attribute names.", - "type": "method", - "file_path": "scipy\\scipy\\io\\arff\\_arffread.py", - "ast_data": "FunctionDef name:names arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "maxpool2d_inference_rule", - "source_code": "@register_inference_rule(torch.nn.MaxPool2d) def maxpool2d_inference_rule(n: Node, module_instance): assert isinstance(n.args[0], Node) if n.args[0].type = = Dyn and isinstance(n.type, TensorType): n.args[0].type = expand_to_tensor_dim(n.args[0].type, len(n.type.__args__)) if isinstance(n.args[0].type, TensorType): output = maxpool2d_check(n.args[0].type, module_instance) n.type = get_greatest_upper_bound(output, n.type) return n.type", - "docstring": "Given a MaxPool2D instance and a node check the following conditions: - Input size matches size 3 or 4 - Current node type is consistent with the output type we will calculate - Input size matches output size and the last two dimensions of the output are w_out and h_out. The remaining dimensions are the same as the input - Our final result is the greatest upper bound of the output we calculate and the current node type.", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py", - "ast_data": "FunctionDef name:maxpool2d_inference_rule arguments arg:n type:Node arg:module_instance Call call:register_inference_rule If BoolOp Compare op:Eq Call call:isinstance Assign Call call:expand_to_tensor_dim If Call call:isinstance Assign Call call:maxpool2d_check Assign Call call:get_greatest_upper_bound Return return:yes" - }, - { - "library": "pytorch", - "name": "chain", - "source_code": "@compatibility(is_backward_compatible = False) def chain(*op_support: OperatorSupportBase) -> OperatorSupportBase: def _chain(submods, node) -> bool: return all((x.is_node_supported(submods, node) for x in op_support)) return create_op_support(_chain)", - "docstring": "Combines a sequence of instances to form a single instance by evaluating each input instance, and returns False if any of it reports False.", - "type": "function", - "file_path": "pytorch\\torch\\fx\\passes\\operator_support.py", - "ast_data": "FunctionDef name:chain arguments vararg:op_support Call call:compatibility FunctionDef name:_chain arguments arg:submods arg:node Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "nodes_first", - "source_code": "def nodes_first(nodes: list[torch.fx.Node], node_call_back = None) -> Optional[torch.fx.Node]: ret = nodes_filter(nodes, node_call_back if node_call_back else lambda node: True) if len(ret) > 0: return ret[0] return None", - "docstring": "Returns the first node that matches the node_call_back. If no node matches, returns None. When node_call_back is None, returns the first node in the node list.", - "type": "function", - "file_path": "pytorch\\torch\\_export\\utils.py", - "ast_data": "FunctionDef name:nodes_first arguments arg:nodes type:list[torch.fx.Node] arg:node_call_back Assign Call call:nodes_filter If Compare op:Gt Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "auth_code", - "source_code": "def auth_code(self, target): return capi.get_auth_code(self.ptr, target if target is None else force_bytes(target))", - "docstring": "Return the authority code for the given string target node.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py", - "ast_data": "FunctionDef name:auth_code arguments arg:self arg:target Return return:yes" - }, - { - "library": "tensorflow", - "name": "main_op_with_restore", - "source_code": "@tf_export(v1 = ['saved_model.main_op_with_restore', 'saved_model.main_op.main_op_with_restore']) @deprecation.deprecated(None, _DEPRECATION_MSG) def main_op_with_restore(restore_op_name): with ops.control_dependencies([main_op()]): main_op_with_restore = control_flow_ops.group(restore_op_name) return main_op_with_restore", - "docstring": "Returns a main op to init variables, tables and restore the graph. Returns the main op including the group of ops that initializes all variables, initialize local variables, initialize all tables and the restore op name. Args: restore_op_name: Name of the op to use to restore the graph. Returns: The set of ops to be run as part of the main op upon the load operation.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\saved_model\\main_op_impl.py", - "ast_data": "FunctionDef name:main_op_with_restore arguments arg:restore_op_name Call call:tf_export Call call:deprecated With Assign Call call:group Return return:yes" - }, - { - "library": "pytorch", - "name": "after_fork", - "source_code": "def after_fork(): _pool_set.clear() AsyncCompile.process_pool.cache_clear()", - "docstring": "Reset pools to initial state without shutting them down", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\async_compile.py", - "ast_data": "FunctionDef name:after_fork arguments" - }, - { - "library": "matplotlib", - "name": "get_transform", - "source_code": "def get_transform(self): if self._transform is None: self._transform = self.axes.transData elif not isinstance(self._transform, mtransforms.Transform) and hasattr(self._transform, '_as_mpl_transform'): self._transform = self._transform._as_mpl_transform(self.axes) return self._transform", - "docstring": "Return the instance used by this ContourSet.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\contour.py", - "ast_data": "FunctionDef name:get_transform arguments arg:self If Compare op:Is Assign If BoolOp Call call:hasattr Assign Call call:_as_mpl_transform Return return:yes" - }, - { - "library": "mongo", - "name": "ok_for_legacy", - "source_code": "@property def ok_for_legacy(self) -> bool: return self.level is None or self.level = = 'local'", - "docstring": "Return `` if this read concern is compatible with old wire protocol versions.", - "type": "method", - "file_path": "mongo\\pymongo\\read_concern.py", - "ast_data": "FunctionDef name:ok_for_legacy arguments arg:self Return return:yes" - }, - { - "library": "mongo", - "name": "HeartbeatLogger", - "source_code": "class HeartbeatLogger(monitoring.ServerHeartbeatListener): def started(self, event: monitoring.ServerHeartbeatStartedEvent) -> None: logging.info(f'Heartbeat sent to server {event.connection_id}') def succeeded(self, event: monitoring.ServerHeartbeatSucceededEvent) -> None: logging.info(f'Heartbeat to server {event.connection_id} succeeded with reply {event.reply.document}') def failed(self, event: monitoring.ServerHeartbeatFailedEvent) -> None: logging.warning(f'Heartbeat to server {event.connection_id} failed with error {event.reply}')", - "docstring": "A simple listener that logs server heartbeat events. Listens for :class:, :class:, and :class: events and logs them at the severity level using :mod:. .. versionadded:: 3.11", - "type": "class", - "file_path": "mongo\\pymongo\\event_loggers.py", - "ast_data": "ClassDef name:HeartbeatLogger FunctionDef name:started arguments arg:self arg:event type:monitoring.ServerHeartbeatStartedEvent FunctionDef name:succeeded arguments arg:self arg:event type:monitoring.ServerHeartbeatSucceededEvent FunctionDef name:failed arguments arg:self arg:event type:monitoring.ServerHeartbeatFailedEvent" - }, - { - "library": "tensorflow", - "name": "graph_wrapped_for_higher_order_tape_gradients", - "source_code": "def graph_wrapped_for_higher_order_tape_gradients(graph): while graph is not None: if 'cflow_gradient_wrapper' in getattr(graph, 'name', ''): return True graph = getattr(graph, 'outer_graph', None) return False", - "docstring": "Check if is wrapped by .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util_v2.py", - "ast_data": "FunctionDef name:graph_wrapped_for_higher_order_tape_gradients arguments arg:graph While Compare op:IsNot If Compare op:In Return return:yes Assign Call call:getattr Return return:yes" - }, - { - "library": "tensorflow", - "name": "build_nccl_then_shuffle", - "source_code": "def build_nccl_then_shuffle(input_tensors, gather_devices, nccl_red_op, shuffle_red_op, un_op = None): def upper_level_f(x): return build_shuffle_all_reduce(x, gather_devices, shuffle_red_op, un_op) return _build_nccl_hybrid(input_tensors, nccl_red_op, upper_level_f)", - "docstring": "Construct hybrid of NCCL within workers, Shuffle across workers.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py", - "ast_data": "FunctionDef name:build_nccl_then_shuffle arguments arg:input_tensors arg:gather_devices arg:nccl_red_op arg:shuffle_red_op arg:un_op FunctionDef name:upper_level_f arguments arg:x Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "make_multi", - "source_code": "def make_multi(self, geom_type, model_field): return geom_type.num in self.MULTI_TYPES and model_field.__class__.__name__ = = 'Multi%s' % geom_type.django", - "docstring": "Given the OGRGeomType for a geometry and its associated GeometryField, determine whether the geometry should be turned into a GeometryCollection.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\utils\\layermapping.py", - "ast_data": "FunctionDef name:make_multi arguments arg:self arg:geom_type arg:model_field Return return:yes" - }, - { - "library": "algorithms", - "name": "rotate_right", - "source_code": "def rotate_right(head, k): if not head or not head.next: return head current = head length = 1 while current.next: current = current.next length + = 1 current.next = head k = k % length for i in range(length - k): current = current.next head = current.next current.next = None return head", - "docstring": ":type head: ListNode :type k: int :rtype: ListNode", - "type": "function", - "file_path": "algorithms\\algorithms\\linkedlist\\rotate_list.py", - "ast_data": "FunctionDef name:rotate_right arguments arg:head arg:k If BoolOp Return return:yes Assign Assign While Assign Assign Assign For Call call:range Assign Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "local_tensor", - "source_code": "def local_tensor(self) -> torch.Tensor: if len(self.local_shards()) ! = 1: raise NotImplementedError('Only single local shard is supported.') return self.local_shards()[0].tensor", - "docstring": "Return local tensor for a sharded_tensor. For now we only support single local shard. Returns: A :class: of the local shard.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\api.py", - "ast_data": "FunctionDef name:local_tensor arguments arg:self If Compare op:NotEq Raise raises:NotImplementedError('Only single local shard is supported.') Return return:yes" - }, - { - "library": "django", - "name": "check_envelope", - "source_code": "def check_envelope(result, func, cargs, offset = -1): return ptr_byref(cargs, offset)", - "docstring": "Check a function that returns an OGR Envelope by reference.", - "type": "function", - "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\errcheck.py", - "ast_data": "FunctionDef name:check_envelope arguments arg:result arg:func arg:cargs arg:offset Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, indices, values, dense_shape): with ops.name_scope(None, 'SparseTensor', [indices, values, dense_shape]): indices = ops.convert_to_tensor(indices, name = 'indices', dtype = dtypes.int64) values = ops.convert_to_tensor(values, name = 'values') dense_shape = ops.convert_to_tensor(dense_shape, name = 'dense_shape', dtype = dtypes.int64) dense_shape_default = tensor_util.constant_value_as_shape(dense_shape) self._indices = indices self._values = values self._dense_shape = dense_shape self._dense_shape_default = dense_shape_default indices_shape = indices.shape.with_rank(2) values_shape = values.shape.with_rank(1) dense_shape_shape = dense_shape.shape.with_rank(1) indices_shape.dims[0].assert_is_compatible_with(values_shape.dims[0]) indices_shape.dims[1].assert_is_compatible_with(dense_shape_shape.dims[0])", - "docstring": "Creates a . Args: indices: A 2-D int64 tensor of shape . values: A 1-D tensor of any type and shape . dense_shape: A 1-D int64 tensor of shape . Raises: ValueError: When building an eager SparseTensor if is unknown or contains unknown elements (None or -1).", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:indices arg:values arg:dense_shape With Assign Call call:convert_to_tensor Assign Call call:convert_to_tensor Assign Call call:convert_to_tensor Assign Call call:constant_value_as_shape Assign Assign Assign Assign Assign Call call:with_rank Assign Call call:with_rank Assign Call call:with_rank" - }, - { - "library": "matplotlib", - "name": "back", - "source_code": "def back(self): self._pos = max(self._pos - 1, 0) return self()", - "docstring": "Move the position back and return the current element.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", - "ast_data": "FunctionDef name:back arguments arg:self Assign Call call:max Return return:yes" - }, - { - "library": "algorithms", - "name": "__str__", - "source_code": "def __str__(self): resp = '' for i in range(self.row): for j in range(self.col): resp + = ' {0} '.format(self.board[i][j]) resp + = '\\n' return resp", - "docstring": "[summary] Generates a board representation as string. Returns: [str] -- [board representation]", - "type": "method", - "file_path": "algorithms\\algorithms\\dfs\\sudoku_solver.py", - "ast_data": "FunctionDef name:__str__ arguments arg:self Assign For Call call:range For Call call:range Return return:yes" - }, - { - "library": "scikit-learn", - "name": "fit", - "source_code": "@_fit_context(prefer_skip_nested_validation = True) def fit(self, X, y, sample_weight = None, check_input = True): super()._fit(X, y, sample_weight = sample_weight, check_input = check_input) return self", - "docstring": "Build a decision tree classifier from the training set (X, y). Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Internally, it will be converted to ``. y : array-like of shape (n_samples,) or (n_samples, n_outputs) The target values (class labels) as integers or strings. sample_weight : array-like of shape (n_samples,), default=None Sample weights. If None, then samples are equally weighted. Splits that would create child nodes with net zero or negative weight are ignored while searching for a split in each node. Splits are also ignored if they would result in any single class carrying a negative weight in either child node. check_input : bool, default=True Allow to bypass several input checking. Don't use this parameter unless you know what you're doing. Returns ------- self : DecisionTreeClassifier Fitted estimator.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\tree\\_classes.py", - "ast_data": "FunctionDef name:fit arguments arg:self arg:X arg:y arg:sample_weight arg:check_input Call call:_fit_context Return return:yes" - }, - { - "library": "tensorflow", - "name": "assert_cardinality", - "source_code": "@tf_export('data.experimental.assert_cardinality') def assert_cardinality(expected_cardinality): def _apply_fn(dataset): return _AssertCardinalityDataset(dataset, expected_cardinality) return _apply_fn", - "docstring": "Asserts the cardinality of the input dataset. NOTE: The following assumes that \"examples.tfrecord\" contains 42 records. >>> dataset = tf.data.TFRecordDataset(\"examples.tfrecord\") >>> cardinality = tf.data.experimental.cardinality(dataset) >>> print((cardinality == tf.data.experimental.UNKNOWN_CARDINALITY).numpy()) True >>> dataset = dataset.apply(tf.data.experimental.assert_cardinality(42)) >>> print(tf.data.experimental.cardinality(dataset).numpy()) 42 Args: expected_cardinality: The expected cardinality of the input dataset. Returns: A transformation function, which can be passed to . Raises: FailedPreconditionError: The assertion is checked at runtime (when iterating the dataset) and an error is raised if the actual and expected cardinality differ.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\cardinality.py", - "ast_data": "FunctionDef name:assert_cardinality arguments arg:expected_cardinality Call call:tf_export FunctionDef name:_apply_fn arguments arg:dataset Return return:yes Return return:yes" - }, - { - "library": "mongo", - "name": "get_server_by_address", - "source_code": "def get_server_by_address(self, address: _Address) -> Optional[Server]: return self._servers.get(address)", - "docstring": "Get a Server or None. Returns the current version of the server immediately, even if it's Unknown or absent from the topology. Only use this in unittests. In driver code, use select_server_by_address, since then you're assured a recent view of the server's type and wire protocol version.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\topology.py", - "ast_data": "FunctionDef name:get_server_by_address arguments arg:self arg:address type:_Address Return return:yes" - }, - { - "library": "django", - "name": "close_rings", - "source_code": "def close_rings(self): capi.geom_close_rings(self.ptr)", - "docstring": "If there are any rings within this geometry that have not been closed, this routine will do so by adding the starting point at the end.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", - "ast_data": "FunctionDef name:close_rings arguments arg:self" - }, - { - "library": "flexx", - "name": "lencode", - "source_code": "def lencode(x): if x < = 250: return spack(' Callable[[type[_ExtensionIndexT]], type[_ExtensionIndexT]]: def wrapper(cls: type[_ExtensionIndexT]) -> type[_ExtensionIndexT]: for name in names: meth = _inherit_from_data(name, delegate, cache = cache, wrap = wrap) setattr(cls, name, meth) return cls return wrapper", - "docstring": "Class decorator to pin attributes from an ExtensionArray to a Index subclass. Parameters ---------- names : List[str] delegate : class cache : bool, default False wrap : bool, default False Whether to wrap the inherited result in an Index.", - "type": "function", - "file_path": "pandas\\pandas\\core\\indexes\\extension.py", - "ast_data": "FunctionDef name:inherit_names arguments arg:names type:list[str] arg:delegate type:type arg:cache type:bool arg:wrap type:bool FunctionDef name:wrapper arguments arg:cls type:type[_ExtensionIndexT] For Assign Call call:_inherit_from_data Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "dot", - "source_code": "def dot(self, V): assert V.shape = = (self.m,) return np.bincount(self.rows, weights = self.vals * V[self.cols], minlength = self.m)", - "docstring": "Dot product of self by a vector *V* in sparse-dense to dense format *V* dense vector of shape (self.m,).", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triinterpolate.py", - "ast_data": "FunctionDef name:dot arguments arg:self arg:V Return return:yes" - }, - { - "library": "tensorflow", - "name": "deserialize_feature_columns", - "source_code": "def deserialize_feature_columns(configs, custom_objects = None): columns_by_name = {} return [deserialize_feature_column(c, custom_objects, columns_by_name) for c in configs]", - "docstring": "Deserializes a list of FeatureColumns configs. Returns a list of FeatureColumns given a list of config dicts acquired by . Args: configs: A list of Dicts with the serialization of feature columns acquired by . custom_objects: A Dict from custom_object name to the associated keras serializable objects (FeatureColumns, classes or functions). Returns: FeatureColumn objects corresponding to the input configs. Raises: ValueError if called with input that is not a list of FeatureColumns.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\feature_column\\serialization.py", - "ast_data": "FunctionDef name:deserialize_feature_columns arguments arg:configs arg:custom_objects Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "mutatedy", - "source_code": "def mutatedy(self): return self._points[0, 1] ! = self._points_orig[0, 1] or self._points[1, 1] ! = self._points_orig[1, 1]", - "docstring": "Return whether the y-limits have changed since init.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", - "ast_data": "FunctionDef name:mutatedy arguments arg:self Return return:yes" - }, - { - "library": "numpy", - "name": "check_libs2", - "source_code": "def check_libs2(self, lib_dirs, libs, opt_libs = []): exts = self.library_extensions() info = self._check_libs(lib_dirs, libs, opt_libs, exts) if not info: log.info(' libraries %s not found in %s', ', '.join(libs), lib_dirs) return info", - "docstring": "If static or shared libraries are available then return their info dictionary. Checks each library for shared or static.", - "type": "method", - "file_path": "numpy\\numpy\\distutils\\system_info.py", - "ast_data": "FunctionDef name:check_libs2 arguments arg:self arg:lib_dirs arg:libs arg:opt_libs Assign Call call:library_extensions Assign Call call:_check_libs If Return return:yes" - }, - { - "library": "django", - "name": "cycle_key", - "source_code": "def cycle_key(self): self.save()", - "docstring": "Keep the same data but with a new key. Call save() and it will automatically save a cookie with a new key at the end of the request.", - "type": "method", - "file_path": "django\\django\\contrib\\sessions\\backends\\signed_cookies.py", - "ast_data": "FunctionDef name:cycle_key arguments arg:self" - }, - { - "library": "scipy", - "name": "p_max", - "source_code": "def p_max(self, n: int) -> int: return self._post_padding(n)[1]", - "docstring": "Index of first non-overlapping upper time slice for sample input. Note that center point t[p_max] = (p_max(n)-1) * is typically larger than last time index t[n-1] == (-1) * . The upper border of samples indexes covered by the window slices is given by . Furthermore, does not denote the number of slices since is typically less than zero. A detailed example is provided in the :ref: section of the :ref:. See Also -------- k_min: The smallest possible signal index. k_max: First sample index after signal end not touched by a time slice. p_min: The smallest possible slice index. p_num: Number of time slices, i.e., - . p_range: Determine and validate slice index range. ShortTimeFFT: Class this method belongs to.", - "type": "method", - "file_path": "scipy\\scipy\\signal\\_short_time_fft.py", - "ast_data": "FunctionDef name:p_max arguments arg:self arg:n type:int Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, coord, timer_interval_secs, target = None, args = None, kwargs = None): if not isinstance(coord, Coordinator): raise ValueError(\"'coord' argument must be a Coordinator: %s\" % coord) super(LooperThread, self).__init__() self.daemon = True self._coord = coord self._timer_interval_secs = timer_interval_secs self._target = target if self._target: self._args = args or () self._kwargs = kwargs or {} elif args or kwargs: raise ValueError(\"'args' and 'kwargs' argument require that you also pass 'target'\") self._coord.register_thread(self)", - "docstring": "Create a LooperThread. Args: coord: A Coordinator. timer_interval_secs: Time boundaries at which to call Run(), or None if it should be called back to back. target: Optional callable object that will be executed in the thread. args: Optional arguments to pass to when calling it. kwargs: Optional keyword arguments to pass to when calling it. Raises: ValueError: If one of the arguments is invalid.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\coordinator.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:coord arg:timer_interval_secs arg:target arg:args arg:kwargs If Raise raises:ValueError(\"'coord' argument must be a Coordinator: %s\" % coord) Assign Assign Assign Assign If Assign BoolOp Assign BoolOp If BoolOp Raise raises:ValueError(\"'args' and 'kwargs' argument require that you also pass 'target'\")" - }, - { - "library": "pytorch", - "name": "load", - "source_code": "def load(self, name: str, index: Expr, mode: Any = None) -> CSEVariable: return self.create_cse_var(name, bounds = ValueRanges.unknown())", - "docstring": "Mock load function for memory planning to optimize allocations properly.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py", - "ast_data": "FunctionDef name:load arguments arg:self arg:name type:str arg:index type:Expr arg:mode type:Any Return return:yes" - }, - { - "library": "tensorflow", - "name": "History", - "source_code": "class History(Callback): def __init__(self): super(History, self).__init__() self.history = {} def on_train_begin(self, logs = None): self.epoch = [] def on_epoch_end(self, epoch, logs = None): logs = logs or {} self.epoch.append(epoch) for k, v in logs.items(): self.history.setdefault(k, []).append(v) self.model.history = self", - "docstring": "Callback that records events into a object. This callback is automatically applied to every Keras model. The object gets returned by the method of models. Example: >>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)]) >>> model.compile(tf.keras.optimizers.SGD(), loss='mse') >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5), ... epochs=10, verbose=1) >>> print(history.params) {'verbose': 1, 'epochs': 10, 'steps': 1} >>> # check the keys of history object >>> print(history.history.keys()) dict_keys(['loss'])", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", - "ast_data": "ClassDef name:History FunctionDef name:__init__ arguments arg:self Assign FunctionDef name:on_train_begin arguments arg:self arg:logs Assign FunctionDef name:on_epoch_end arguments arg:self arg:epoch arg:logs Assign BoolOp For Call call:items Assign" - }, - { - "library": "scikit-learn", - "name": "fit", - "source_code": "@_fit_context(prefer_skip_nested_validation = True) def fit(self, X, y = None): if y is None: X = validate_data(self, X, accept_sparse = ['csr', 'csc']) else: X, y = validate_data(self, X, y, accept_sparse = ['csr', 'csc'], multi_output = True) self._check_params(X, y) score_func_ret = self.score_func(X, y) if isinstance(score_func_ret, (list, tuple)): self.scores_, self.pvalues_ = score_func_ret self.pvalues_ = np.asarray(self.pvalues_) else: self.scores_ = score_func_ret self.pvalues_ = None self.scores_ = np.asarray(self.scores_) return self", - "docstring": "Run score function on (X, y) and get the appropriate features. Parameters ---------- X : array-like of shape (n_samples, n_features) The training input samples. y : array-like of shape (n_samples,) or None The target values (class labels in classification, real numbers in regression). If the selector is unsupervised then can be set to . Returns ------- self : object Returns the instance itself.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\feature_selection\\_univariate_selection.py", - "ast_data": "FunctionDef name:fit arguments arg:self arg:X arg:y Call call:_fit_context If Compare op:Is Assign Call call:validate_data Assign Call call:validate_data Assign Call call:score_func If Call call:isinstance Assign Assign Call call:asarray Assign Assign Assign Call call:asarray Return return:yes" - }, - { - "library": "mongo", - "name": "exists", - "source_code": "def exists(self, document_or_id: Optional[Any] = None, session: Optional[ClientSession] = None, **kwargs: Any) -> bool: _disallow_transactions(session) if kwargs: f = self._files.find_one(kwargs, ['_id'], session = session) else: f = self._files.find_one(document_or_id, ['_id'], session = session) return f is not None", - "docstring": "Check if a file exists in this instance of :class:. The file to check for can be specified by the value of its `exists~pymongo.client_session.ClientSession` parameter.", - "type": "method", - "file_path": "mongo\\gridfs\\synchronous\\grid_file.py", - "ast_data": "FunctionDef name:exists arguments arg:self arg:document_or_id type:Optional[Any] arg:session type:Optional[ClientSession] kwarg:kwargs If Assign Call call:find_one Assign Call call:find_one Return return:yes" - }, - { - "library": "tensorflow", - "name": "monitoring_helper", - "source_code": "def monitoring_helper(service_addr, duration_ms, monitoring_level, num_queries): if monitoring_level < = 0 or monitoring_level > 2: sys.exit('Please choose a monitoring level between 1 and 2.') for query in range(0, num_queries): res = profiler_client.monitor(service_addr, duration_ms, monitoring_level) print('Cloud TPU Monitoring Results (Sample ', query, '): \\n\\n', res)", - "docstring": "Helper function to print monitoring results. Helper function to print monitoring results for num_queries times. Args: service_addr: Address of the TPU profiler service. duration_ms: Duration of one monitoring sample in milliseconds. monitoring_level: An integer between 1 and 2. Level 2 is more verbose than level 1 and shows more metrics. num_queries: Number of monitoring samples to collect.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\profiler\\capture_tpu_profile.py", - "ast_data": "FunctionDef name:monitoring_helper arguments arg:service_addr arg:duration_ms arg:monitoring_level arg:num_queries If BoolOp Compare op:LtE Compare op:Gt For Call call:range Assign Call call:monitor" - }, - { - "library": "tensorflow", - "name": "add_event", - "source_code": "def add_event(self, event): if not self._closed: event_pb = event.SerializeToString() self._session.run(self._add_event_op, feed_dict = {self._event_placeholder: event_pb})", - "docstring": "Adds an event to the event file. Args: event: An protocol buffer.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer_v2.py", - "ast_data": "FunctionDef name:add_event arguments arg:self arg:event If Assign Call call:SerializeToString" - }, - { - "library": "sphinx", - "name": "add_cell", - "source_code": "def add_cell(self, height: int, width: int) -> None: self.cell_id + = 1 for col in range(width): for row in range(height): assert self.cells[self.row + row, self.col + col] = = 0 self.cells[self.row + row, self.col + col] = self.cell_id", - "docstring": "Adds a new cell to a table. It will be located at current position: (``).", - "type": "method", - "file_path": "sphinx\\sphinx\\writers\\latex.py", - "ast_data": "FunctionDef name:add_cell arguments arg:self arg:height type:int arg:width type:int For Call call:range For Call call:range Assign" - }, - { - "library": "scipy", - "name": "isscalarlike", - "source_code": "def isscalarlike(x) -> bool: return np.isscalar(x) or (isdense(x) and x.ndim = = 0)", - "docstring": "Is x either a scalar, an array scalar, or a 0-dim array?", - "type": "function", - "file_path": "scipy\\scipy\\sparse\\_sputils.py", - "ast_data": "FunctionDef name:isscalarlike arguments arg:x Return return:yes" - }, - { - "library": "tensorflow", - "name": "__len__", - "source_code": "@abstractmethod def __len__(self): raise NotImplementedError", - "docstring": "Number of batch in the Sequence. Returns: The number of batches in the Sequence.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py", - "ast_data": "FunctionDef name:__len__ arguments arg:self Raise raises:NotImplementedError" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, scaffold = None, master = '', config = None, checkpoint_dir = None, checkpoint_filename_with_path = None): self._checkpoint_dir = checkpoint_dir self._checkpoint_filename_with_path = checkpoint_filename_with_path self._scaffold = scaffold or Scaffold() self._session_manager = None self._master = master self._config = config", - "docstring": "Initializes a chief session creator. Args: scaffold: A used for gathering or building supportive ops. If not specified a default one is created. It's used to finalize the graph. master: representation of the TensorFlow master to use. config: proto used to configure the session. checkpoint_dir: A string. Optional path to a directory where to restore variables. checkpoint_filename_with_path: Full file name path to the checkpoint file.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:scaffold arg:master arg:config arg:checkpoint_dir arg:checkpoint_filename_with_path Assign Assign Assign BoolOp Call call:Scaffold Assign Assign Assign" - }, - { - "library": "pytorch", - "name": "add_scalars", - "source_code": "def add_scalars(self, main_tag, tag_scalar_dict, global_step = None, walltime = None): torch._C._log_api_usage_once('tensorboard.logging.add_scalars') walltime = time.time() if walltime is None else walltime fw_logdir = self._get_file_writer().get_logdir() for tag, scalar_value in tag_scalar_dict.items(): fw_tag = fw_logdir + '/' + main_tag.replace('/', '_') + '_' + tag assert self.all_writers is not None if fw_tag in self.all_writers.keys(): fw = self.all_writers[fw_tag] else: fw = FileWriter(fw_tag, self.max_queue, self.flush_secs, self.filename_suffix) self.all_writers[fw_tag] = fw fw.add_summary(scalar(main_tag, scalar_value), global_step, walltime)", - "docstring": "Add many scalar data to summary. Args: main_tag (str): The parent name for the tags tag_scalar_dict (dict): Key-value pair storing the tag and corresponding values global_step (int): Global step value to record walltime (float): Optional override default walltime (time.time()) seconds after epoch of event Examples:: from torch.utils.tensorboard import SummaryWriter writer = SummaryWriter() r = 5 for i in range(100): writer.add_scalars('run_14h', {'xsinx':i*np.sin(i/r), 'xcosx':i*np.cos(i/r), 'tanx': np.tan(i/r)}, i) writer.close() # This call adds three values to the same scalar plot with the tag # 'run_14h' in TensorBoard's scalar section. Expected result: .. image:: _static/img/tensorboard/add_scalars.png :scale: 50 %", - "type": "method", - "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py", - "ast_data": "FunctionDef name:add_scalars arguments arg:self arg:main_tag arg:tag_scalar_dict arg:global_step arg:walltime Assign Assign Call call:get_logdir For Call call:items Assign If Compare op:In Assign Assign Call call:FileWriter Assign" - }, - { - "library": "kornia", - "name": "get_translation_matrix2d", - "source_code": "def get_translation_matrix2d(translations: Tensor) -> Tensor: transform: Tensor = eye_like(3, translations)[:, : 2, :] transform[..., 2] + = translations transform_h = convert_affinematrix_to_homography(transform) return transform_h", - "docstring": "Compose translation matrix from the components. Args: translations: tensor containing the translation vector with shape :math:. Returns: the affine transformation matrix :math:. .. note:: This function is often used in conjunction with :func:, :func:.", - "type": "function", - "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py", - "ast_data": "FunctionDef name:get_translation_matrix2d arguments arg:translations type:Tensor Assign Call call:convert_affinematrix_to_homography Return return:yes" - }, - { - "library": "tensorflow", - "name": "reason", - "source_code": "@staticmethod def reason(op_idx, details): return '%d %s' % (op_idx, details)", - "docstring": "Returns reason why the Op at op_idx is traced or not.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", - "ast_data": "FunctionDef name:reason arguments arg:op_idx arg:details Return return:yes" - }, - { - "library": "pandas", - "name": "ColumnNullType", - "source_code": "class ColumnNullType(enum.IntEnum): NON_NULLABLE = 0 USE_NAN = 1 USE_SENTINEL = 2 USE_BITMASK = 3 USE_BYTEMASK = 4", - "docstring": "Integer enum for null type representation. Attributes ---------- NON_NULLABLE : int Non-nullable column. USE_NAN : int Use explicit float NaN value. USE_SENTINEL : int Sentinel value besides NaN/NaT. USE_BITMASK : int The bit is set/unset representing a null on a certain position. USE_BYTEMASK : int The byte is set/unset representing a null on a certain position.", - "type": "class", - "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py", - "ast_data": "ClassDef name:ColumnNullType Assign Assign Assign Assign Assign" - }, - { - "library": "tensorflow", - "name": "create", - "source_code": "@property def create(self): if not self._in_graph_mode: raise RuntimeError('This operation is not supported when eager execution is enabled.') return self._initializer_op", - "docstring": "The op responsible for initializing this variable.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", - "ast_data": "FunctionDef name:create arguments arg:self If Raise raises:RuntimeError('This operation is not supported when eager execution is enabled.') Return return:yes" - }, - { - "library": "scikit-learn", - "name": "transform", - "source_code": "def transform(self, K, copy = True): check_is_fitted(self) xp, _ = get_namespace(K) K = validate_data(self, K, copy = copy, force_writeable = True, dtype = _array_api.supported_float_dtypes(xp), reset = False) K_pred_cols = (xp.sum(K, axis = 1) / self.K_fit_rows_.shape[0])[:, None] K - = self.K_fit_rows_ K - = K_pred_cols K + = self.K_fit_all_ return K", - "docstring": "Center kernel matrix. Parameters ---------- K : ndarray of shape (n_samples1, n_samples2) Kernel matrix. copy : bool, default=True Set to False to perform inplace computation. Returns ------- K_new : ndarray of shape (n_samples1, n_samples2) Returns the instance itself.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\preprocessing\\_data.py", - "ast_data": "FunctionDef name:transform arguments arg:self arg:K arg:copy Assign Call call:get_namespace Assign Call call:validate_data Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "write_to_file", - "source_code": "def write_to_file(self, file_path): with gfile.Open(file_path, 'w') as f: for line in self._lines: f.write(line + '\\n')", - "docstring": "Write the object itself to file, in a plain format. The font_attr_segs and annotations are ignored. Args: file_path: (str) path of the file to write to.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", - "ast_data": "FunctionDef name:write_to_file arguments arg:self arg:file_path With For" - }, - { - "library": "tensorflow", - "name": "get_input_names", - "source_code": "def get_input_names(self): return self._input_names", - "docstring": "Returns keys to name inputs by. In case inputs provided were a list, tuple or single entry, we make up a key 'input_%d'. For dictionary case, we return a sorted list of keys.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", - "ast_data": "FunctionDef name:get_input_names arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "handle_default_options", - "source_code": "def handle_default_options(options): if options.settings: os.environ['DJANGO_SETTINGS_MODULE'] = options.settings if options.pythonpath: sys.path.insert(0, options.pythonpath)", - "docstring": "Include any default options that all commands should accept here so that ManagementUtility can handle them before searching for user commands.", - "type": "function", - "file_path": "django\\django\\core\\management\\base.py", - "ast_data": "FunctionDef name:handle_default_options arguments arg:options If Assign If" - }, - { - "library": "tensorflow", - "name": "converted_self", - "source_code": "def converted_self(self): raise NotImplementedError", - "docstring": "A copy of this Convertible to be modified during conversion. Returns: Implementations should return the copied instance, which in turn should be contained in converted_enclosing_graph(). This instance is the one that will be modified during conversion. Its main use will be in the implementations of convert_variable_to_constant().", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py", - "ast_data": "FunctionDef name:converted_self arguments arg:self Raise raises:NotImplementedError" - }, - { - "library": "sphinx", - "name": "get_objects", - "source_code": "def get_objects(self) -> Iterable[tuple[str, str, str, str, str, int]]: return []", - "docstring": "Return an iterable of \"object descriptions\". Object descriptions are tuples with six items: `` Object should not show up in search at all.", - "type": "method", - "file_path": "sphinx\\sphinx\\domains\\__init__.py", - "ast_data": "FunctionDef name:get_objects arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "string_table", - "source_code": "def string_table(self): return self._string_table", - "docstring": "Returns a list of strings to store in pprof's string_table.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\profiler\\pprof_profiler.py", - "ast_data": "FunctionDef name:string_table arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_picker", - "source_code": "def set_picker(self, p): if not callable(p): self.set_pickradius(p) self._picker = p", - "docstring": "Set the event picker details for the line. Parameters ---------- p : float or callable[[Artist, Event], tuple[bool, dict]] If a float, it is used as the pick radius in points.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\lines.py", - "ast_data": "FunctionDef name:set_picker arguments arg:self arg:p If Assign" - }, - { - "library": "pytorch", - "name": "Scope", - "source_code": "@compatibility(is_backward_compatible = False) class Scope: def __init__(self, module_path: str, module_type: Any): super().__init__() self.module_path = module_path self.module_type = module_type", - "docstring": "Scope object that records the module path and the module type of a module. Scope is used to track the information of the module that contains a Node in a Graph of GraphModule. For example:: class Sub(torch.nn.Module): def forward(self, x): # This will be a call_method Node in GraphModule, # scope for this would be (module_path=\"sub\", module_type=Sub) return x.transpose(1, 2) class M(torch.nn.Module): def __init__(self) -> None: self.sub = Sub() def forward(self, x): # This will be a call_method Node as well, # scope for this would be (module_path=\"\", None) x = x.transpose(1, 2) x = self.sub(x) return x", - "type": "class", - "file_path": "pytorch\\torch\\fx\\proxy.py", - "ast_data": "ClassDef name:Scope Call call:compatibility FunctionDef name:__init__ arguments arg:self arg:module_path type:str arg:module_type type:Any Assign Assign" - }, - { - "library": "matplotlib", - "name": "set_facecolor", - "source_code": "def set_facecolor(self, color): self.patch.set_facecolor(color)", - "docstring": "Set the face color of the Figure rectangle. Parameters ---------- color : :mpltype:", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\figure.py", - "ast_data": "FunctionDef name:set_facecolor arguments arg:self arg:color" - }, - { - "library": "scikit-learn", - "name": "decision_function", - "source_code": "@available_if(_estimator_has('decision_function', delegates = ('final_estimator_', 'final_estimator'))) def decision_function(self, X): check_is_fitted(self) return self.final_estimator_.decision_function(self.transform(X))", - "docstring": "Decision function for samples in using the final estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. Returns ------- decisions : ndarray of shape (n_samples,), (n_samples, n_classes), or (n_samples, n_classes * (n_classes-1) / 2) The decision function computed the final estimator.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py", - "ast_data": "FunctionDef name:decision_function arguments arg:self arg:X Call call:available_if Return return:yes" - }, - { - "library": "scipy", - "name": "Exponential", - "source_code": "class Exponential(Benchmark): change_dimensionality = True def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-1.0] * self.N, [1.0] * self.N)) self.global_optimum = [[0.0 for _ in range(self.N)]] self.fglob = -1.0 def fun(self, x, *args): self.nfev + = 1 return -exp(-0.5 * sum(x ** 2.0))", - "docstring": "Exponential [1] objective function. This class defines the Exponential global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Exponential}}(x) = -e^{-0.5 \\sum_{i=1}^n x_i^2} Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO Jamil are missing a minus sign on fglob", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_E.py", - "ast_data": "ClassDef name:Exponential Assign FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Return return:yes" - }, - { - "library": "pytorch", - "name": "add_benchmark_harness", - "source_code": "def add_benchmark_harness(self, output): if not config.benchmark_harness: return self.benchmark_compiled_module(output) output.writelines(['', '', 'if __name__ = = \"__main__\": ']) with output.indent(): output.writelines(['from torch._inductor.wrapper_benchmark import compiled_module_main', f\"compiled_module_main('{get_benchmark_name()}', benchmark_compiled_module)\"])", - "docstring": "Append a benchmark harness to generated code for debugging", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper.py", - "ast_data": "FunctionDef name:add_benchmark_harness arguments arg:self arg:output If Return return:no With" - }, - { - "library": "django", - "name": "metadata", - "source_code": "@metadata.setter def metadata(self, value): for domain, metadata in value.items(): domain = None if domain = = 'DEFAULT' else domain.encode() for meta_name, meta_value in metadata.items(): capi.set_ds_metadata_item(self._ptr, meta_name.encode(), meta_value.encode() if meta_value else None, domain)", - "docstring": "Set the metadata. Update only the domains that are contained in the value dictionary.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\raster\\base.py", - "ast_data": "FunctionDef name:metadata arguments arg:self arg:value For Call call:items Assign For Call call:items" - }, - { - "library": "authlib", - "name": "handle_response", - "source_code": "def handle_response(self, status, body, headers): raise NotImplementedError()", - "docstring": "Return HTTP response. Framework MUST implement this function.", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py", - "ast_data": "FunctionDef name:handle_response arguments arg:self arg:status arg:body arg:headers Raise raises:NotImplementedError()" - }, - { - "library": "tensorflow", - "name": "get_flat_tensor_specs", - "source_code": "def get_flat_tensor_specs(element_spec): return list(itertools.chain.from_iterable((spec._flat_tensor_specs for spec in nest.flatten(element_spec))))", - "docstring": "Returns a list s for the element tensor representation. Args: element_spec: A nested structure of objects representing to element type specification. Returns: A list s for the element tensor representation.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\data\\util\\structure.py", - "ast_data": "FunctionDef name:get_flat_tensor_specs arguments arg:element_spec Return return:yes" - }, - { - "library": "tensorflow", - "name": "num_graph_execution_traces", - "source_code": "def num_graph_execution_traces(self): return len(self._graph_execution_trace_digests)", - "docstring": "Get the number of graph execution traces read so far.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", - "ast_data": "FunctionDef name:num_graph_execution_traces arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "assert_non_negative_v2", - "source_code": "@tf_export('debugging.assert_non_negative', v1 = []) @dispatch.add_dispatch_support def assert_non_negative_v2(x, message = None, summarize = None, name = None): return assert_non_negative(x = x, summarize = summarize, message = message, name = name)", - "docstring": "Assert the condition holds element-wise. This Op checks that holds for every element of . If is empty, this is trivially satisfied. If is not >= 0 everywhere, , as well as the first entries of are printed, and is raised. Args: x: Numeric . message: A string to prefix to the default message. summarize: Print this many entries of each tensor. name: A name for this operation (optional). Defaults to \"assert_non_negative\". Returns: Op raising unless is all non-negative. This can be used with inside of s to block followup computation until the check has executed. @compatibility(eager) returns None @end_compatibility Raises: InvalidArgumentError: if the check can be performed immediately and is False. The check can be performed immediately during eager execution or if is statically known.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\check_ops.py", - "ast_data": "FunctionDef name:assert_non_negative_v2 arguments arg:x arg:message arg:summarize arg:name Call call:tf_export Return return:yes" - }, - { - "library": "tensorflow", - "name": "__call__", - "source_code": "def __call__(self, y_true, y_pred, sample_weight = None): graph_ctx = tf_utils.graph_context_for_symbolic_tensors(y_true, y_pred, sample_weight) with backend.name_scope(self._name_scope), graph_ctx: if context.executing_eagerly(): call_fn = self.call else: call_fn = autograph.tf_convert(self.call, ag_ctx.control_status_ctx()) losses = call_fn(y_true, y_pred) return losses_utils.compute_weighted_loss(losses, sample_weight, reduction = self._get_reduction())", - "docstring": "Invokes the instance. Args: y_true: Ground truth values. shape = , except sparse loss functions such as sparse categorical crossentropy where shape = y_pred: The predicted values. shape = sample_weight: Optional acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If is a tensor of size , then the total loss for each sample of the batch is rescaled by the corresponding element in the vector. If the shape of is (or can be broadcasted to this shape), then each loss element of is scaled by the corresponding value of . (Note on: all loss functions reduce by 1 dimension, usually axis=-1.) Returns: Weighted loss float . If is , this has shape ; otherwise, it is scalar. (Note because all loss functions reduce by 1 dimension, usually axis=-1.) Raises: ValueError: If the shape of is invalid.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:y_true arg:y_pred arg:sample_weight Assign Call call:graph_context_for_symbolic_tensors With If Call call:executing_eagerly Assign Assign Call call:tf_convert Assign Call call:call_fn Return return:yes" - }, - { - "library": "django", - "name": "convert_value", - "source_code": "@cached_property def convert_value(self): field = self.output_field internal_type = field.get_internal_type() if internal_type = = 'FloatField': return lambda value, expression, connection: None if value is None else float(value) elif internal_type.endswith('IntegerField'): return lambda value, expression, connection: None if value is None else int(value) elif internal_type = = 'DecimalField': return lambda value, expression, connection: None if value is None else Decimal(value) return self._convert_value_noop", - "docstring": "Expressions provide their own converters because users have the option of manually specifying the output_field which may be a different type from the one the database returns.", - "type": "method", - "file_path": "django\\django\\db\\models\\expressions.py", - "ast_data": "FunctionDef name:convert_value arguments arg:self Assign Assign Call call:get_internal_type If Compare op:Eq Return return:yes If Call call:endswith Return return:yes If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "TensorShapeProtoToList", - "source_code": "def TensorShapeProtoToList(shape): return [dim.size for dim in shape.dim]", - "docstring": "Convert a TensorShape to a list. Args: shape: A TensorShapeProto. Returns: List of integers representing the dimensions of the tensor.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_util.py", - "ast_data": "FunctionDef name:TensorShapeProtoToList arguments arg:shape Return return:yes" - }, - { - "library": "tensorflow", - "name": "inner_dim_sizes", - "source_code": "@property def inner_dim_sizes(self): return self._inner_dim_sizes", - "docstring": "The inner dimension sizes for this shape. Returns: A 1-D integer .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py", - "ast_data": "FunctionDef name:inner_dim_sizes arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "createResolutionCallbackFromFrame", - "source_code": "def createResolutionCallbackFromFrame(frames_up: int = 0): frame = inspect.currentframe() i = 0 while i < frames_up + 1: assert frame is not None frame = frame.f_back i + = 1 assert frame is not None f_locals = frame.f_locals f_globals = frame.f_globals class env: def __getattr__(self, key): if key in f_locals: return f_locals[key] elif key in f_globals: return f_globals[key] elif key in dir(builtins): return getattr(builtins, key) return createResolutionCallbackFromEnv(env())", - "docstring": "Creates a function which, given a string variable name, returns the value of the variable in the scope of the caller of the function which called createResolutionCallbackFromFrame (by default). This is used to enable access in-scope Python variables inside TorchScript fragments. frames_up is number of additional frames to go up on the stack. The default value is 0, which correspond to the frame of the caller of createResolutionCallbackFromFrame. Also for example, if frames_up is set to 1, then the frame of the caller's caller of createResolutionCallbackFromFrame will be taken. For example, the following program prints 2:: def bar(): cb = createResolutionCallbackFromFrame(1) print(cb(\"foo\")) def baz(): foo = 2 bar() baz()", - "type": "function", - "file_path": "pytorch\\torch\\_jit_internal.py", - "ast_data": "FunctionDef name:createResolutionCallbackFromFrame arguments arg:frames_up type:int Assign Call call:currentframe Assign While Compare op:Lt Assign Assign Assign ClassDef name:env FunctionDef name:__getattr__ arguments arg:self arg:key If Compare op:In Return return:yes If Compare op:In Return return:yes If Compare op:In Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "g", - "source_code": "def g(self): return self.data.hour % 12 or 12", - "docstring": "Hour, 12-hour format without leading zeros; i.e. '1' to '12'", - "type": "method", - "file_path": "django\\django\\utils\\dateformat.py", - "ast_data": "FunctionDef name:g arguments arg:self Return return:yes" - }, - { - "library": "numpy", - "name": "islower", - "source_code": "def islower(self): return islower(self)", - "docstring": "Returns true for each element if all cased characters in the string are lowercase and there is at least one cased character, false otherwise. See Also -------- char.islower", - "type": "method", - "file_path": "numpy\\numpy\\_core\\defchararray.py", - "ast_data": "FunctionDef name:islower arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "NotFoundError", - "source_code": "@tf_export('errors.NotFoundError') class NotFoundError(OpError): def __init__(self, node_def, op, message, *args): super(NotFoundError, self).__init__(node_def, op, message, NOT_FOUND, *args)", - "docstring": "Raised when a requested entity (e.g., a file or directory) was not found. For example, running the operation could raise if it receives the name of a file that does not exist.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py", - "ast_data": "ClassDef name:NotFoundError Call call:tf_export FunctionDef name:__init__ arguments arg:self arg:node_def arg:op arg:message vararg:args" - }, - { - "library": "authlib", - "name": "revoke_access_token", - "source_code": "def revoke_access_token(self, token, request): raise NotImplementedError()", - "docstring": "Revoke a token access in case an invalid client has been requested. Developers MUST implement this method in subclass:: def revoke_access_token(self, token, request): token.revoked = True token.save()", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\rfc7592\\endpoint.py", - "ast_data": "FunctionDef name:revoke_access_token arguments arg:self arg:token arg:request Raise raises:NotImplementedError()" - }, - { - "library": "pytorch", - "name": "seed", - "source_code": "def seed() -> int: seed = default_generator.seed() import torch.cuda if not torch.cuda._is_in_bad_fork(): torch.cuda.manual_seed_all(seed) import torch.mps if not torch.mps._is_in_bad_fork(): torch.mps.manual_seed(seed) import torch.xpu if not torch.xpu._is_in_bad_fork(): torch.xpu.manual_seed_all(seed) _seed_custom_device(seed) return seed", - "docstring": "Sets the seed for generating random numbers to a non-deterministic random number on all devices. Returns a 64 bit number used to seed the RNG.", - "type": "function", - "file_path": "pytorch\\torch\\random.py", - "ast_data": "FunctionDef name:seed arguments Assign Call call:seed If If If Return return:yes" - }, - { - "library": "tensorflow", - "name": "scatter", - "source_code": "@tf_should_use.should_use_result def scatter(self, indices, value, name = None): return self._implementation.scatter(indices, value, name = name)", - "docstring": "Scatter the values of a in specific indices of a . Args: indices: A taking values in . If the is not dynamic, . value: (N+1)-D. Tensor of type . The Tensor to unpack. name: A name for the operation (optional). Returns: A new TensorArray object with flow that ensures the scatter occurs. Use this object for all subsequent operations. Raises: ValueError: if the shape inference fails.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\tensor_array_ops.py", - "ast_data": "FunctionDef name:scatter arguments arg:self arg:indices arg:value arg:name Return return:yes" - }, - { - "library": "scrapy", - "name": "remove_from_list", - "source_code": "def remove_from_list(self, name: _SettingsKeyT, item: Any) -> None: value: list[str] = self.getlist(name) if item not in value: raise ValueError(f'{item!r} not found in the {name} setting ({value!r}).') self.set(name, [v for v in value if v ! = item], self.getpriority(name) or 0)", - "docstring": "Remove *item* from the :class: setting with the specified *name*. If *item* is missing, raise :exc:. This change is applied regardless of the priority of the *name* setting. The setting priority is not affected by this change either.", - "type": "method", - "file_path": "scrapy\\scrapy\\settings\\__init__.py", - "ast_data": "FunctionDef name:remove_from_list arguments arg:self arg:name type:_SettingsKeyT arg:item type:Any If Compare op:NotIn Raise raises:ValueError(f'{item!r} not found in the {name} setting ({value!r}).')" - }, - { - "library": "virtualenv", - "name": "from_exe", - "source_code": "@classmethod def from_exe(cls, exe, app_data = None, raise_on_error = True, ignore_cache = False, resolve_to_host = True, env = None): from virtualenv.discovery.cached_py_info import from_exe env = os.environ if env is None else env proposed = from_exe(cls, app_data, exe, env = env, raise_on_error = raise_on_error, ignore_cache = ignore_cache) if isinstance(proposed, PythonInfo) and resolve_to_host: try: proposed = proposed._resolve_to_system(app_data, proposed) except Exception as exception: if raise_on_error: raise LOGGER.info('ignore %s due cannot resolve system due to %r', proposed.original_executable, exception) proposed = None return proposed", - "docstring": "Given a path to an executable get the python information.", - "type": "method", - "file_path": "virtualenv\\src\\virtualenv\\discovery\\py_info.py", - "ast_data": "FunctionDef name:from_exe arguments arg:cls arg:exe arg:app_data arg:raise_on_error arg:ignore_cache arg:resolve_to_host arg:env Assign Assign Call call:from_exe If BoolOp Call call:isinstance Try Assign Call call:_resolve_to_system ExceptHandler If Raise Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "finalize_prefix", - "source_code": "def finalize_prefix(self): old_prefix = self.prefix self.prefix = IndentedBuffer() super().finalize_prefix() for kernel in self._triton_call_wrappers.values(): self.prefix.writeline('\\n') kernel.generate(self) self.prefix.writeline('\\n') self.prefix.splice(old_prefix)", - "docstring": "Define the triton kernels now that autotuning is finished", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_wrapper_gpu.py", - "ast_data": "FunctionDef name:finalize_prefix arguments arg:self Assign Assign Call call:IndentedBuffer For Call call:values" - }, - { - "library": "pytorch", - "name": "AOTAutogradCacheEntry", - "source_code": "class AOTAutogradCacheEntry(GenericAOTAutogradCacheEntry[CompiledForward, CompiledBackward]): pass", - "docstring": "Regular AOTAutogradCacheEntry: saves the forward/backward FxGraphCache keys and looks them up in FxGraphCache on load", - "type": "class", - "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py", - "ast_data": "ClassDef name:AOTAutogradCacheEntry" - }, - { - "library": "sphinx", - "name": "resolve_xref", - "source_code": "def resolve_xref(self, env: BuildEnvironment, fromdocname: str, builder: Builder, typ: str, target: str, node: pending_xref, contnode: Element) -> nodes.reference | None: pass", - "docstring": "Resolve the pending_xref *node* with the given *typ* and *target*. This method should return a new node, to replace the xref node, containing the *contnode* which is the markup content of the cross-reference. If no resolution can be found, None can be returned; the xref node will then given to the :event: event, and if that yields no resolution, replaced by *contnode*. The method can also raise :exc: to suppress the :event: event being emitted.", - "type": "method", - "file_path": "sphinx\\sphinx\\domains\\__init__.py", - "ast_data": "FunctionDef name:resolve_xref arguments arg:self arg:env type:BuildEnvironment arg:fromdocname type:str arg:builder type:Builder arg:typ type:str arg:target type:str arg:node type:pending_xref arg:contnode type:Element" - }, - { - "library": "cherrypy", - "name": "PerpetualTimer", - "source_code": "class PerpetualTimer(threading.Timer): def __init__(self, *args, **kwargs): self.bus = kwargs.pop('bus', None) super(PerpetualTimer, self).__init__(*args, **kwargs) def run(self): while True: self.finished.wait(self.interval) if self.finished.isSet(): return try: self.function(*self.args, **self.kwargs) except Exception: if self.bus: self.bus.log('Error in perpetual timer thread function %r.' % self.function, level = 40, traceback = True) raise", - "docstring": "A responsive subclass of threading.Timer whose run() method repeats. Use this timer only when you really need a very interruptible timer; this checks its 'finished' condition up to 20 times a second, which can results in pretty high CPU usage", - "type": "class", - "file_path": "cherrypy\\cherrypy\\process\\plugins.py", - "ast_data": "ClassDef name:PerpetualTimer FunctionDef name:__init__ arguments arg:self vararg:args kwarg:kwargs Assign Call call:pop FunctionDef name:run arguments arg:self While If Call call:isSet Return return:no Try ExceptHandler If Raise" - }, - { - "library": "sphinx", - "name": "get_qualname_for", - "source_code": "def get_qualname_for(self, name: str) -> list[str] | None: if self.current_function: if self.current_classes and self.context[-1] = = '__init__': return [*self.context[: -1], name] else: return None else: return [*self.context, name]", - "docstring": "Get qualified name for given object as a list of string(s).", - "type": "method", - "file_path": "sphinx\\sphinx\\pycode\\parser.py", - "ast_data": "FunctionDef name:get_qualname_for arguments arg:self arg:name type:str If If BoolOp Compare op:Eq Return return:yes Return return:yes Return return:yes" - }, - { - "library": "mongo", - "name": "ReadPreference", - "source_code": "class ReadPreference: PRIMARY = Primary() PRIMARY_PREFERRED = PrimaryPreferred() SECONDARY = Secondary() SECONDARY_PREFERRED = SecondaryPreferred() NEAREST = Nearest()", - "docstring": "An enum that defines some commonly used read preference modes. Apps can also create a custom read preference, for example:: Nearest(tag_sets=[{\"node\":\"analytics\"}]) See :doc: for code examples. A read preference is used in three cases: :class: connected to a single mongod: - `~pymongo.mongo_client.MongoClient~pymongo.errors.AutoReconnect~pymongo.errors.AutoReconnect~pymongo.mongo_client.MongoClient~pymongo.errors.OperationFailure~pymongo.errors.OperationFailure`: Read from any shard member.", - "type": "class", - "file_path": "mongo\\pymongo\\read_preferences.py", - "ast_data": "ClassDef name:ReadPreference Assign Call call:Primary Assign Call call:PrimaryPreferred Assign Call call:Secondary Assign Call call:SecondaryPreferred Assign Call call:Nearest" - }, - { - "library": "salmon", - "name": "clear", - "source_code": "def clear(self): with self.lock: self.states = shelve.open(self.database_path) super().clear() self.states.close()", - "docstring": "Primarily used in the debugging/unit testing process to make sure the states are clear. In production this could be a bad thing.", - "type": "method", - "file_path": "salmon\\salmon\\routing.py", - "ast_data": "FunctionDef name:clear arguments arg:self With Assign Call call:open" - }, - { - "library": "pytorch", - "name": "DisableBreakpoints", - "source_code": "class DisableBreakpoints: def __enter__(self) -> None: target = get_target() if target.DisableAllBreakpoints() is False: print('[-] error: failed to disable all breakpoints.') def __exit__(self, etype: Any, evalue: Any, tb: Any) -> None: target = get_target() if target.EnableAllBreakpoints() is False: print('[-] error: failed to enable all breakpoints.')", - "docstring": "Context-manager to temporarily disable all lldb breakpoints, useful if there is a risk to hit one during the evaluation of one of our custom commands", - "type": "class", - "file_path": "pytorch\\tools\\lldb\\pytorch_lldb.py", - "ast_data": "ClassDef name:DisableBreakpoints FunctionDef name:__enter__ arguments arg:self Assign Call call:get_target If Compare op:Is FunctionDef name:__exit__ arguments arg:self arg:etype type:Any arg:evalue type:Any arg:tb type:Any Assign Call call:get_target If Compare op:Is" - }, - { - "library": "tensorflow", - "name": "on_train_batch_begin", - "source_code": "@doc_controls.for_subclass_implementers @generic_utils.default def on_train_batch_begin(self, batch, logs = None): self.on_batch_begin(batch, logs = logs)", - "docstring": "Called at the beginning of a training batch in methods. Subclasses should override for any actions to run. Note that if the argument to in is set to , this method will only be called every batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict, contains the return value of . Typically, the values of the 's metrics are returned. Example: .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", - "ast_data": "FunctionDef name:on_train_batch_begin arguments arg:self arg:batch arg:logs" - }, - { - "library": "cherrypy", - "name": "check_static_paths", - "source_code": "def check_static_paths(self): request = cherrypy.request for sn, app in cherrypy.tree.apps.items(): if not isinstance(app, cherrypy.Application): continue request.app = app for section in app.config: request.get_resource(section + '/dummy.html') conf = request.config.get if conf('tools.staticdir.on', False): msg = '' root = conf('tools.staticdir.root') dir = conf('tools.staticdir.dir') if dir is None: msg = 'tools.staticdir.dir is not set.' else: fulldir = '' if os.path.isabs(dir): fulldir = dir if root: msg = 'dir is an absolute path, even though a root is provided.' testdir = os.path.join(root, dir[1:]) if os.path.exists(testdir): msg + = '\\nIf you meant to serve the filesystem folder at %r, remove the leading slash from dir.' % (testdir,) elif not root: msg = 'dir is a relative path and no root provided.' else: fulldir = os.path.join(root, dir) if not os.path.isabs(fulldir): msg = '%r is not an absolute path.' % (fulldir,) if fulldir and (not os.path.exists(fulldir)): if msg: msg + = '\\n' msg + = '%r (root + dir) is not an existing filesystem path.' % fulldir if msg: warnings.warn('%s\\nsection: [%s]\\nroot: %r\\ndir: %r' % (msg, section, root, dir))", - "docstring": "Check Application config for incorrect static paths.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\_cpchecker.py", - "ast_data": "FunctionDef name:check_static_paths arguments arg:self Assign For Call call:items If Assign For Assign If Call call:conf Assign Assign Call call:conf Assign Call call:conf If Compare op:Is Assign Assign If Call call:isabs Assign If Assign Assign Call call:join If Call call:exists If Assign Assign Call call:join If Assign If BoolOp If If" - }, - { - "library": "tensorflow", - "name": "get_input_at", - "source_code": "def get_input_at(self, node_index): return self._get_node_attribute_at_index(node_index, 'input_tensors', 'input')", - "docstring": "Retrieves the input tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. will correspond to the first input node of the layer. Returns: A tensor (or list of tensors if the layer has multiple inputs). Raises: RuntimeError: If called in Eager mode.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py", - "ast_data": "FunctionDef name:get_input_at arguments arg:self arg:node_index Return return:yes" - }, - { - "library": "numpy", - "name": "abspath", - "source_code": "def abspath(self, path): return DataSource.abspath(self, self._fullpath(path))", - "docstring": "Return absolute path of file in the Repository directory. If is an URL, then will return either the location the file exists locally or the location it would exist when opened using the method. Parameters ---------- path : str or pathlib.Path Can be a local file or a remote URL. This may, but does not have to, include the with which the was initialized. Returns ------- out : str Complete path, including the destination directory.", - "type": "method", - "file_path": "numpy\\numpy\\lib\\_datasource.py", - "ast_data": "FunctionDef name:abspath arguments arg:self arg:path Return return:yes" - }, - { - "library": "numpy", - "name": "__rtruediv__", - "source_code": "def __rtruediv__(self, other): return true_divide(other, self)", - "docstring": "Divide self into other, and return a new masked array.", - "type": "method", - "file_path": "numpy\\numpy\\ma\\core.py", - "ast_data": "FunctionDef name:__rtruediv__ arguments arg:self arg:other Return return:yes" - }, - { - "library": "django", - "name": "localtime", - "source_code": "def localtime(value = None, timezone = None): if value is None: value = now() if timezone is None: timezone = get_current_timezone() if is_naive(value): raise ValueError('localtime() cannot be applied to a naive datetime') return value.astimezone(timezone)", - "docstring": "Convert an aware datetime.datetime to local time. Only aware datetimes are allowed. When value is omitted, it defaults to now(). Local time is defined by the current time zone, unless another time zone is specified.", - "type": "function", - "file_path": "django\\django\\utils\\timezone.py", - "ast_data": "FunctionDef name:localtime arguments arg:value arg:timezone If Compare op:Is Assign Call call:now If Compare op:Is Assign Call call:get_current_timezone If Call call:is_naive Raise raises:ValueError('localtime() cannot be applied to a naive datetime') Return return:yes" - }, - { - "library": "pytorch", - "name": "Extension", - "source_code": "class Extension(abc.ABC): @staticmethod @abc.abstractmethod def registry_name() -> str: pass @staticmethod @abc.abstractmethod def from_descriptor(version: str) -> 'Extension': pass @abc.abstractmethod def get_descriptor(self) -> str: pass", - "docstring": "Extensions provide modular additions to functionality within distributed checkpointing, which affect the layout or format of the written artifacts. Extensions may be built into pytorch, or provided externally. When writing, the caller provides a list of extension instances of the appropriate type. Each extension can output a descriptor which is used to reconstitute the extension at read-time.", - "type": "class", - "file_path": "pytorch\\torch\\distributed\\checkpoint\\_extension.py", - "ast_data": "ClassDef name:Extension FunctionDef name:registry_name arguments FunctionDef name:from_descriptor arguments arg:version type:str FunctionDef name:get_descriptor arguments arg:self" - }, - { - "library": "django", - "name": "validate_thread_sharing", - "source_code": "def validate_thread_sharing(self): if not (self.allow_thread_sharing or self._thread_ident = = _thread.get_ident()): raise DatabaseError(\"DatabaseWrapper objects created in a thread can only be used in that same thread. The object with alias '%s' was created in thread id %s and this is thread id %s.\" % (self.alias, self._thread_ident, _thread.get_ident()))", - "docstring": "Validate that the connection isn't accessed by another thread than the one which originally created it, unless the connection was explicitly authorized to be shared between threads (via the method). Raise an exception if the validation fails.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\base.py", - "ast_data": "FunctionDef name:validate_thread_sharing arguments arg:self If Raise raises:DatabaseError(\"DatabaseWrapper objects created in a thread can only be used in that same thread. The object with alias '%s' was created in thread id %s and this is thread id %s.\" % (self.alias, self._thread_ident, _thread.get_ident()))" - }, - { - "library": "pytorch", - "name": "scoped_copy", - "source_code": "def scoped_copy(self) -> Self: new_cse = self.clone() new_cse._cache = ScopedDict(self._cache) new_cse.reduction_cache = ScopedDict(self.reduction_cache) new_cse.store_cache = ScopedDict(self.store_cache) return new_cse", - "docstring": "Return a copy of using ScopedDict so changes to *_cache aren't visible in self", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\common.py", - "ast_data": "FunctionDef name:scoped_copy arguments arg:self Assign Call call:clone Assign Call call:ScopedDict Assign Call call:ScopedDict Assign Call call:ScopedDict Return return:yes" - }, - { - "library": "pytorch", - "name": "set_tensor", - "source_code": "def set_tensor(self, name: str, value: torch.Tensor) -> None: prefix, _, attr = name.rpartition('.') set_tensor(self.get_submodule(prefix), attr, value)", - "docstring": "Set the attribute specified by the given path to value. For example, to set the attribute mod.layer1.conv1.weight, use accessor.set_tensor(\"layer1.conv1.weight\", value)", - "type": "method", - "file_path": "pytorch\\torch\\nn\\utils\\_named_member_accessor.py", - "ast_data": "FunctionDef name:set_tensor arguments arg:self arg:name type:str arg:value type:torch.Tensor Assign Call call:rpartition" - }, - { - "library": "scikit-learn", - "name": "inverse_transform", - "source_code": "def inverse_transform(self, X): check_is_fitted(self) X = check_array(X) return X @ self.components_ + self.mean_", - "docstring": "Transform data from the latent space to the original space. This inversion is an approximation due to the loss of information induced by the forward decomposition. .. versionadded:: 1.2 Parameters ---------- X : ndarray of shape (n_samples, n_components) Data in the latent space. Returns ------- X_original : ndarray of shape (n_samples, n_features) Reconstructed data in the original space.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\decomposition\\_sparse_pca.py", - "ast_data": "FunctionDef name:inverse_transform arguments arg:self arg:X Assign Call call:check_array Return return:yes" - }, - { - "library": "tensorflow", - "name": "set_python_graph", - "source_code": "def set_python_graph(self, python_graph): self._python_graph = python_graph self._node_traceback = {} if self._python_graph: for op in self._python_graph.get_operations(): self._node_traceback[op.name] = tuple(map(tuple, op.traceback))", - "docstring": "Provide Python object to the wrapper. Unlike the partition graphs, which are protobuf objects, is a Python object and carries additional information such as the traceback of the construction of the nodes in the graph. Args: python_graph: (ops.Graph) The Python Graph object.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", - "ast_data": "FunctionDef name:set_python_graph arguments arg:self arg:python_graph Assign Assign If For Call call:get_operations Assign Call call:tuple" - }, - { - "library": "tensorflow", - "name": "get_compiler_ir", - "source_code": "def get_compiler_ir(self, device_name, platform_name, function_name, flat_args, captured_inputs, stage = 'hlo'): return pywrap_tfe.TF_GetCompilerIr(self._context_handle, function_name, stage, device_name, flat_args, captured_inputs, platform_name)", - "docstring": "Get the compiler IR bytes. Args: device_name: The name of the device with the form as \"/job:localhost/replica:0/task:0/device:CPU:0\", \"/device:TPU:0\" etc. When this is used, actual device is needed for getting the compiler IR. platform_name: The name of the platform, e.g. \"TPU\". When this is used, first we find a device whose name contains the platform, if it is found we get the compiler IR by device. Otherwise the compiler IR is obtained as if using that device. The former logic of falling back to device is necessary, as there are cases of TF variables that need to access devices, but the upper layer may generally choose platform for getting compiler IR in a device-agnostic way. function_name: The name of the function to get the compiler IR. flat_args: The flat argument inputs. captured_inputs: The inputs that are captured. stage: The exported stage for the given function. Returns: The compiler IR bytes.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", - "ast_data": "FunctionDef name:get_compiler_ir arguments arg:self arg:device_name arg:platform_name arg:function_name arg:flat_args arg:captured_inputs arg:stage Return return:yes" - }, - { - "library": "django", - "name": "Concat", - "source_code": "class Concat(Func): function = None template = '%(expressions)s' def __init__(self, *expressions, **extra): if len(expressions) < 2: raise ValueError('Concat must take at least two expressions') paired = self._paired(expressions, output_field = extra.get('output_field')) super().__init__(paired, **extra) def _paired(self, expressions, output_field): if len(expressions) = = 2: return ConcatPair(*expressions, output_field = output_field) return ConcatPair(expressions[0], self._paired(expressions[1:], output_field = output_field), output_field = output_field)", - "docstring": "Concatenate text fields together. Backends that result in an entire null expression when any arguments are null will wrap each argument in coalesce functions to ensure a non-null result.", - "type": "class", - "file_path": "django\\django\\db\\models\\functions\\text.py", - "ast_data": "ClassDef name:Concat Assign Assign FunctionDef name:__init__ arguments arg:self vararg:expressions kwarg:extra If Compare op:Lt Raise raises:ValueError('Concat must take at least two expressions') Assign Call call:_paired FunctionDef name:_paired arguments arg:self arg:expressions arg:output_field If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "scikit-learn", - "name": "predict_proba", - "source_code": "@available_if(_estimator_has('predict_proba', delegates = ('final_estimator_', 'final_estimator'))) def predict_proba(self, X): check_is_fitted(self) y_pred = self.final_estimator_.predict_proba(self.transform(X)) if isinstance(self._label_encoder, list): y_pred = np.array([preds[:, 0] for preds in y_pred]).T return y_pred", - "docstring": "Predict class probabilities for using the final estimator. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vectors, where is the number of samples and is the number of features. Returns ------- probabilities : ndarray of shape (n_samples, n_classes) or list of ndarray of shape (n_output,) The class probabilities of the input samples.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\ensemble\\_stacking.py", - "ast_data": "FunctionDef name:predict_proba arguments arg:self arg:X Call call:available_if Assign Call call:predict_proba If Call call:isinstance Assign Return return:yes" - }, - { - "library": "virtualenv", - "name": "__init__", - "source_code": "def __init__(self, options, enabled) -> None: self.enabled = enabled self.env = options.env", - "docstring": "Create. :param options: the parsed options as defined within :meth: :param enabled: a flag weather the seeder is enabled or not", - "type": "method", - "file_path": "virtualenv\\src\\virtualenv\\seed\\seeder.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:options arg:enabled Assign Assign" - }, - { - "library": "tensorflow", - "name": "greater_equal", - "source_code": "def greater_equal(a, b): return _maybe_static(a) > = _maybe_static(b)", - "docstring": "A version of tf.greater_equal that eagerly evaluates if possible.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_utils.py", - "ast_data": "FunctionDef name:greater_equal arguments arg:a arg:b Return return:yes" - }, - { - "library": "numpy", - "name": "feature_detect", - "source_code": "def feature_detect(self, names): names = self.feature_get_til(names, 'implies_detect') detect = [] for n in names: d = self.feature_supported[n] detect + = d.get('detect', d.get('group', [n])) return detect", - "docstring": "Return a list of CPU features that required to be detected sorted from the lowest to highest interest.", - "type": "method", - "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py", - "ast_data": "FunctionDef name:feature_detect arguments arg:self arg:names Assign Call call:feature_get_til Assign For Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "get_float32_matmul_precision", - "source_code": "def get_float32_matmul_precision() -> str: return _C._get_float32_matmul_precision()", - "docstring": "Returns the current value of float32 matrix multiplication precision. Refer to :func: documentation for more details.", - "type": "function", - "file_path": "pytorch\\torch\\__init__.py", - "ast_data": "FunctionDef name:get_float32_matmul_precision arguments Return return:yes" - }, - { - "library": "tensorflow", - "name": "trace", - "source_code": "def trace(self, graph_element_name): self._depth_count + = 1 node_name = get_node_name(graph_element_name) if node_name = = self._destination_node_name: raise GraphTracingReachedDestination() if node_name in self._skip_node_names: return if node_name in self._visited_nodes: return self._visited_nodes.append(node_name) for input_list in self._input_lists: if node_name not in input_list: continue for inp in input_list[node_name]: if get_node_name(inp) in self._visited_nodes: continue self._inputs.append(inp) self._depth_list.append(self._depth_count) self.trace(inp) self._depth_count - = 1", - "docstring": "Trace inputs. Args: graph_element_name: Name of the node or an output tensor of the node, as a str. Raises: GraphTracingReachedDestination: if destination_node_name of this tracer object is not None and the specified node is reached.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py", - "ast_data": "FunctionDef name:trace arguments arg:self arg:graph_element_name Assign Call call:get_node_name If Compare op:Eq Raise raises:GraphTracingReachedDestination() If Compare op:In Return return:no If Compare op:In Return return:no For If Compare op:NotIn For If Compare op:In" - }, - { - "library": "matplotlib", - "name": "mutatedx", - "source_code": "def mutatedx(self): return self._points[0, 0] ! = self._points_orig[0, 0] or self._points[1, 0] ! = self._points_orig[1, 0]", - "docstring": "Return whether the x-limits have changed since init.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", - "ast_data": "FunctionDef name:mutatedx arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "get_distance", - "source_code": "def get_distance(self, f, dist_val, lookup_type): value = dist_val[0] geodetic = f.geodetic(self.connection) geography = f.geography if isinstance(value, Distance): if geography: dist_param = value.m elif geodetic: if lookup_type = = 'dwithin': raise ValueError('Only numeric values of degree units are allowed on geographic DWithin queries.') dist_param = value.m else: dist_param = getattr(value, Distance.unit_attname(f.units_name(self.connection))) else: dist_param = value return [dist_param]", - "docstring": "Retrieve the distance parameters for the given geometry field, distance lookup value, and the distance lookup type. This is the most complex implementation of the spatial backends due to what is supported on geodetic geometry columns vs. what's available on projected geometry columns. In addition, it has to take into account the geography column type.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py", - "ast_data": "FunctionDef name:get_distance arguments arg:self arg:f arg:dist_val arg:lookup_type Assign Assign Call call:geodetic Assign If Call call:isinstance If Assign If If Compare op:Eq Raise raises:ValueError('Only numeric values of degree units are allowed on geographic DWithin queries.') Assign Assign Call call:getattr Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "name", - "source_code": "@property def name(self): return self._name", - "docstring": "Returns the (non-unique, optional) name of this symbolic Keras value.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py", - "ast_data": "FunctionDef name:name arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "broadcast", - "source_code": "@_exception_logger def broadcast(tensor: torch.Tensor, src: Optional[int] = None, group: Optional[ProcessGroup] = None, async_op: bool = False, group_src: Optional[int] = None): group = _group_or_default_group(group) group_src = _canonicalize_group_rank(group, src, group_src, return_global = False) _check_single_tensor(tensor, 'tensor') if _rank_not_in_group(group): _warn_not_in_group('broadcast') return opts = BroadcastOptions() opts.rootRank = group_src opts.rootTensor = 0 opts.asyncOp = async_op work = group.broadcast([tensor], opts) if async_op: return work elif work is not None: work.wait()", - "docstring": "Broadcasts the tensor to the whole group. `` but not both. Returns: Async work handle, if async_op is set to True. None, if not async_op or if not part of the group", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", - "ast_data": "FunctionDef name:broadcast arguments arg:tensor type:torch.Tensor arg:src type:Optional[int] arg:group type:Optional[ProcessGroup] arg:async_op type:bool arg:group_src type:Optional[int] Assign Call call:_group_or_default_group Assign Call call:_canonicalize_group_rank If Call call:_rank_not_in_group Return return:no Assign Call call:BroadcastOptions Assign Assign Assign Assign Call call:broadcast If Return return:yes If Compare op:IsNot" - }, - { - "library": "tensorflow", - "name": "max", - "source_code": "@dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def max(x, axis = None, keepdims = False): return math_ops.reduce_max(x, axis, keepdims)", - "docstring": "Maximum value in a tensor. Args: x: A tensor or variable. axis: An integer, the axis to find maximum values. keepdims: A boolean, whether to keep the dimensions or not. If is , the rank of the tensor is reduced by 1. If is , the reduced dimension is retained with length 1. Returns: A tensor with maximum values of .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", - "ast_data": "FunctionDef name:max arguments arg:x arg:axis arg:keepdims Return return:yes" - }, - { - "library": "pandas", - "name": "is_re_compilable", - "source_code": "def is_re_compilable(obj: object) -> bool: try: re.compile(obj) except TypeError: return False else: return True", - "docstring": "Check if the object can be compiled into a regex pattern instance. Parameters ---------- obj : The object to check The object to check if the object can be compiled into a regex pattern instance. Returns ------- bool Whether can be compiled as a regex pattern. See Also -------- api.types.is_re : Check if the object is a regex pattern instance. Examples -------- >>> from pandas.api.types import is_re_compilable >>> is_re_compilable(\".*\") True >>> is_re_compilable(1) False", - "type": "function", - "file_path": "pandas\\pandas\\core\\dtypes\\inference.py", - "ast_data": "FunctionDef name:is_re_compilable arguments arg:obj type:object Try ExceptHandler Return return:yes Return return:yes" - }, - { - "library": "numpy", - "name": "integ", - "source_code": "def integ(self, m = 1, k = [], lbnd = None): off, scl = self.mapparms() if lbnd is None: lbnd = 0 else: lbnd = off + scl * lbnd coef = self._int(self.coef, m, k, lbnd, 1.0 / scl) return self.__class__(coef, self.domain, self.window, self.symbol)", - "docstring": "Integrate. Return a series instance that is the definite integral of the current series. Parameters ---------- m : non-negative int The number of integrations to perform. k : array_like Integration constants. The first constant is applied to the first integration, the second to the second, and so on. The list of values must less than or equal to in length and any missing values are set to zero. lbnd : Scalar The lower bound of the definite integral. Returns ------- new_series : series A new series representing the integral. The domain is the same as the domain of the integrated series.", - "type": "method", - "file_path": "numpy\\numpy\\polynomial\\_polybase.py", - "ast_data": "FunctionDef name:integ arguments arg:self arg:m arg:k arg:lbnd Assign Call call:mapparms If Compare op:Is Assign Assign Assign Call call:_int Return return:yes" - }, - { - "library": "django", - "name": "escape", - "source_code": "@keep_lazy(SafeString) def escape(text): return SafeString(html.escape(str(text)))", - "docstring": "Return the given text with ampersands, quotes and angle brackets encoded for use in HTML. Always escape input, even if it's already escaped and marked as such. This may result in double-escaping. If this is a concern, use conditional_escape() instead.", - "type": "function", - "file_path": "django\\django\\utils\\html.py", - "ast_data": "FunctionDef name:escape arguments arg:text Call call:keep_lazy Return return:yes" - }, - { - "library": "kornia", - "name": "BlobDoG", - "source_code": "class BlobDoG(Module): def __init__(self) -> None: super().__init__() def __repr__(self) -> str: return self.__class__.__name__ def forward(self, input: Tensor, sigmas: Optional[Tensor] = None) -> Tensor: return dog_response(input)", - "docstring": "Module that calculates Difference-of-Gaussians blobs. See :func: for details.", - "type": "class", - "file_path": "kornia\\kornia\\feature\\responses.py", - "ast_data": "ClassDef name:BlobDoG FunctionDef name:__init__ arguments arg:self FunctionDef name:__repr__ arguments arg:self Return return:yes FunctionDef name:forward arguments arg:self arg:input type:Tensor arg:sigmas type:Optional[Tensor] Return return:yes" - }, - { - "library": "pandas", - "name": "rewrite_warning", - "source_code": "@contextlib.contextmanager def rewrite_warning(target_message: str, target_category: type[Warning], new_message: str, new_category: type[Warning] | None = None) -> Generator[None]: if new_category is None: new_category = target_category with warnings.catch_warnings(record = True) as record: yield if len(record) > 0: match = re.compile(target_message) for warning in record: if warning.category is target_category and re.search(match, str(warning.message)): category = new_category message: Warning | str = new_message else: category, message = (warning.category, warning.message) warnings.warn_explicit(message = message, category = category, filename = warning.filename, lineno = warning.lineno)", - "docstring": "Rewrite the message of a warning. Parameters ---------- target_message : str Warning message to match. target_category : Warning Warning type to match. new_message : str New warning message to emit. new_category : Warning or None, default None New warning type to emit. When None, will be the same as target_category.", - "type": "function", - "file_path": "pandas\\pandas\\util\\_exceptions.py", - "ast_data": "FunctionDef name:rewrite_warning arguments arg:target_message type:str arg:target_category type:type[Warning] arg:new_message type:str arg:new_category type:type[Warning] | None If Compare op:Is Assign With If Compare op:Gt Assign Call call:compile For If BoolOp Compare op:Is Call call:search Assign Assign" - }, - { - "library": "pytorch", - "name": "trunc_normal_", - "source_code": "def trunc_normal_(tensor: Tensor, mean: float = 0.0, std: float = 1.0, a: float = -2.0, b: float = 2.0, generator: _Optional[torch.Generator] = None) -> Tensor: return _no_grad_trunc_normal_(tensor, mean, std, a, b, generator = generator)", - "docstring": "Fill the input Tensor with values drawn from a truncated normal distribution. The values are effectively drawn from the normal distribution :math: with values outside :math: redrawn until they are within the bounds. The method used for generating the random values works best when :math:. Args: tensor: an n-dimensional mean: the mean of the normal distribution std: the standard deviation of the normal distribution a: the minimum cutoff value b: the maximum cutoff value generator: the torch Generator to sample from (default: None) Examples: >>> w = torch.empty(3, 5) >>> nn.init.trunc_normal_(w)", - "type": "function", - "file_path": "pytorch\\torch\\nn\\init.py", - "ast_data": "FunctionDef name:trunc_normal_ arguments arg:tensor type:Tensor arg:mean type:float arg:std type:float arg:a type:float arg:b type:float arg:generator type:_Optional[torch.Generator] Return return:yes" - }, - { - "library": "pytorch", - "name": "is_target_div_by_dim", - "source_code": "def is_target_div_by_dim(target: list[int], dim: list[DVar]): return BinConstraintD(BinConstraintD(Prod(target), dim, op_mod), 0, op_eq)", - "docstring": "Generate constraints to check if the target dimensions are divisible by the input dimensions Args: target: Target dimensions dim: Input dimensions Returns: Constraints to check divisibility", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py", - "ast_data": "FunctionDef name:is_target_div_by_dim arguments arg:target type:list[int] arg:dim type:list[DVar] Return return:yes" - }, - { - "library": "tensorflow", - "name": "take_while", - "source_code": "@deprecation.deprecated(None, 'Use `tf.data.Dataset.take_while(...)') @tf_export('data.experimental.take_while') def take_while(predicate): def _apply_fn(dataset): return dataset.take_while(predicate = predicate) return _apply_fn", - "docstring": "A transformation that stops dataset iteration based on a . Args: predicate: A function that maps a nested structure of tensors (having shapes and types defined by and ) to a scalar tensor. Returns: A transformation function, which can be passed to .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\take_while_ops.py", - "ast_data": "FunctionDef name:take_while arguments arg:predicate Call call:deprecated Call call:tf_export FunctionDef name:_apply_fn arguments arg:dataset Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "elapsed_time", - "source_code": "def elapsed_time(self, end_event): return super().elapsed_time(end_event)", - "docstring": "Return the time elapsed. Time reported in milliseconds after the event was recorded and before the end_event was recorded.", - "type": "method", - "file_path": "pytorch\\torch\\xpu\\streams.py", - "ast_data": "FunctionDef name:elapsed_time arguments arg:self arg:end_event Return return:yes" - }, - { - "library": "django", - "name": "expected_parameters", - "source_code": "def expected_parameters(self): raise NotImplementedError('subclasses of ListFilter must provide an expected_parameters() method')", - "docstring": "Return the list of parameter names that are expected from the request's query string and that will be used by this filter.", - "type": "method", - "file_path": "django\\django\\contrib\\admin\\filters.py", - "ast_data": "FunctionDef name:expected_parameters arguments arg:self Raise raises:NotImplementedError('subclasses of ListFilter must provide an expected_parameters() method')" - }, - { - "library": "pytorch", - "name": "ResolvedExportOptions", - "source_code": "@deprecated('torch.onnx.dynamo_export is deprecated since 2.7.0. Please use torch.onnx.export(..., dynamo = True) instead.', category = None) class ResolvedExportOptions(ExportOptions): def __init__(self): from torch.onnx._internal.fx import dynamo_graph_extractor, onnxfunction_dispatcher self.dynamic_shapes: bool = True self.fx_tracer: dynamo_graph_extractor.DynamoExport = dynamo_graph_extractor.DynamoExport() self.fake_context = None self.onnx_registry: OnnxRegistry = OnnxRegistry() self.decomposition_table = decomposition_table.create_onnx_friendly_decomposition_table(self.onnx_registry) self.onnxfunction_dispatcher = onnxfunction_dispatcher.OnnxFunctionDispatcher(self.onnx_registry)", - "docstring": "Consolidates :class: with default values. All unspecified options from :class: are assigned a default value. This is an internal class and its API may be changed at any time without notice.", - "type": "class", - "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py", - "ast_data": "ClassDef name:ResolvedExportOptions Call call:deprecated FunctionDef name:__init__ arguments arg:self Assign Assign Call call:create_onnx_friendly_decomposition_table Assign Call call:OnnxFunctionDispatcher" - }, - { - "library": "kornia", - "name": "RgbToRgb255", - "source_code": "class RgbToRgb255(Module): def forward(self, image: Tensor) -> Tensor: return rgb_to_rgb255(image)", - "docstring": "Convert an image from RGB to RGB [0, 255] for visualization purposes. Returns: RGB version of the image. Shape: - image: :math: - output: :math: Example: >>> input = torch.rand(2, 3, 4, 5) >>> rgb = RgbToRgb255() >>> output = rgb(input) # 2x3x4x5", - "type": "class", - "file_path": "kornia\\kornia\\color\\rgb.py", - "ast_data": "ClassDef name:RgbToRgb255 FunctionDef name:forward arguments arg:self arg:image type:Tensor Return return:yes" - }, - { - "library": "authlib", - "name": "get_rsa_public_key", - "source_code": "def get_rsa_public_key(self): raise NotImplementedError()", - "docstring": "A method to get the RSA public key for RSA-SHA1 signature method. For instance, the value is saved on column ``:: def get_rsa_public_key(self): return self.rsa_public_key", - "type": "method", - "file_path": "authlib\\authlib\\oauth1\\rfc5849\\models.py", - "ast_data": "FunctionDef name:get_rsa_public_key arguments arg:self Raise raises:NotImplementedError()" - }, - { - "library": "pytorch", - "name": "get_strides_of_load", - "source_code": "def get_strides_of_load(self, index: sympy.Expr) -> dict[sympy.Symbol, sympy.Expr]: index_to_tile_indexes = {k: v.expr for k, v in self.range_tree_nodes.items()} index_in_tile_vars = sympy_subs(index, index_to_tile_indexes) strides = {} for range_tree in self.range_trees: s = sympy_index_symbol(range_tree.name) strides[s] = sympy_subs(index_in_tile_vars, {s: 1}) - sympy_subs(index_in_tile_vars, {s: 0}) return strides", - "docstring": "This gets the stride of the index for each of the tiling variables (technically, it does it at index 0) For example, if xindex = x0 + 512*x1 + 1024*r0 x0 = (xindex//512) x1 = (xindex % 512) r0 = rindex // 1024 this function would return {xindex: 512, rindex: 1024}", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\simd.py", - "ast_data": "FunctionDef name:get_strides_of_load arguments arg:self arg:index type:sympy.Expr Assign Assign Call call:sympy_subs Assign For Assign Call call:sympy_index_symbol Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "__call__", - "source_code": "def __call__(self, shape, dtype = dtypes.float32, **kwargs): self._validate_kwargs(kwargs, support_partition = False) dtype = _assert_float_dtype(dtype) if len(shape) < 2: raise ValueError(f'The tensor to initialize, specified by argument `shape` must be at least two-dimensional. Received shape = {shape}') num_rows = 1 for dim in shape[: -1]: num_rows * = dim num_cols = shape[-1] flat_shape = (max(num_cols, num_rows), min(num_cols, num_rows)) a = self._random_generator.random_normal(flat_shape, dtype = dtype) q, r = gen_linalg_ops.qr(a, full_matrices = False) d = array_ops.diag_part(r) q * = math_ops.sign(d) if num_rows < num_cols: q = array_ops.matrix_transpose(q) return self.gain * array_ops.reshape(q, shape)", - "docstring": "Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. Only floating point types are supported. **kwargs: Additional keyword arguments. Raises: ValueError: If the dtype is not floating point or the input shape is not valid.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:shape arg:dtype kwarg:kwargs Assign Call call:_assert_float_dtype If Compare op:Lt Raise raises:ValueError(f'The tensor to initialize, specified by argument `shape` must be at least two-dimensional. Received shape={shape}') Assign For Assign Assign Assign Call call:random_normal Assign Call call:qr Assign Call call:diag_part If Compare op:Lt Assign Call call:matrix_transpose Return return:yes" - }, - { - "library": "tensorflow", - "name": "global_norm", - "source_code": "@tf_export('linalg.global_norm', v1 = ['linalg.global_norm', 'global_norm']) @dispatch.add_dispatch_support @deprecation.deprecated_endpoints('global_norm') def global_norm(t_list, name = None): if not isinstance(t_list, collections_abc.Sequence) or isinstance(t_list, str): raise TypeError(f'`t_list` should be a sequence of tensors. Received {type(t_list)}.') t_list = list(t_list) with ops.name_scope(name, 'global_norm', t_list) as name: values = [ops.convert_to_tensor(t.values if isinstance(t, indexed_slices.IndexedSlices) else t, name = 't_%d' % i) if t is not None else t for i, t in enumerate(t_list)] half_squared_norms = [] for v in values: if v is not None: with ops.colocate_with(v): half_squared_norms.append(gen_nn_ops.l2_loss(v)) half_squared_norm = math_ops.reduce_sum(array_ops_stack.stack(half_squared_norms)) norm = math_ops.sqrt(half_squared_norm * constant_op.constant(2.0, dtype = half_squared_norm.dtype), name = 'global_norm') return norm", - "docstring": "Computes the global norm of multiple tensors. Given a tuple or list of tensors , this operation returns the global norm of the elements in all tensors in . The global norm is computed as: Any entries in that are of type None are ignored. Args: t_list: A tuple or list of mixed , , or None. name: A name for the operation (optional). Returns: A 0-D (scalar) of type . Raises: TypeError: If is not a sequence.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\clip_ops.py", - "ast_data": "FunctionDef name:global_norm arguments arg:t_list arg:name Call call:tf_export Call call:deprecated_endpoints If BoolOp Call call:isinstance Raise raises:TypeError(f'`t_list` should be a sequence of tensors. Received {type(t_list)}.') Assign Call call:list With Assign Assign For If Compare op:IsNot With Assign Call call:reduce_sum Assign Call call:sqrt Return return:yes" - }, - { - "library": "scipy", - "name": "backtracking", - "source_code": "def backtracking(A, g, x, p, theta, p_dot_g, lb, ub): alpha = 1 while True: x_new, _ = reflective_transformation(x + alpha * p, lb, ub) step = x_new - x cost_change = -evaluate_quadratic(A, g, step) if cost_change > -0.1 * alpha * p_dot_g: break alpha * = 0.5 active = find_active_constraints(x_new, lb, ub) if np.any(active ! = 0): x_new, _ = reflective_transformation(x + theta * alpha * p, lb, ub) x_new = make_strictly_feasible(x_new, lb, ub, rstep = 0) step = x_new - x cost_change = -evaluate_quadratic(A, g, step) return (x, step, cost_change)", - "docstring": "Find an appropriate step size using backtracking line search.", - "type": "function", - "file_path": "scipy\\scipy\\optimize\\_lsq\\trf_linear.py", - "ast_data": "FunctionDef name:backtracking arguments arg:A arg:g arg:x arg:p arg:theta arg:p_dot_g arg:lb arg:ub Assign While Assign Call call:reflective_transformation Assign Assign If Compare op:Gt Assign Call call:find_active_constraints If Call call:any Assign Call call:reflective_transformation Assign Call call:make_strictly_feasible Assign Assign Return return:yes" - }, - { - "library": "scipy", - "name": "from_power_basis", - "source_code": "@classmethod def from_power_basis(cls, pp, extrapolate = None): if not isinstance(pp, PPoly): raise TypeError(f'.from_power_basis only accepts PPoly instances. Got {type(pp)} instead.') dx = np.diff(pp.x) k = pp.c.shape[0] - 1 rest = (None,) * (pp.c.ndim - 2) c = np.zeros_like(pp.c) for a in range(k + 1): factor = pp.c[a] / comb(k, k - a) * dx[(slice(None),) + rest] ** (k - a) for j in range(k - a, k + 1): c[j] + = factor * comb(j, k - a) if extrapolate is None: extrapolate = pp.extrapolate return cls.construct_fast(c, pp.x, extrapolate, pp.axis)", - "docstring": "Construct a piecewise polynomial in Bernstein basis from a power basis polynomial. Parameters ---------- pp : PPoly A piecewise polynomial in the power basis extrapolate : bool or 'periodic', optional If bool, determines whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. Default is True.", - "type": "method", - "file_path": "scipy\\scipy\\interpolate\\_interpolate.py", - "ast_data": "FunctionDef name:from_power_basis arguments arg:cls arg:pp arg:extrapolate If Raise raises:TypeError(f'.from_power_basis only accepts PPoly instances. Got {type(pp)} instead.') Assign Call call:diff Assign Assign Assign Call call:zeros_like For Call call:range Assign For Call call:range If Compare op:Is Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "to_map", - "source_code": "def to_map(val_or_map: Union[Std, dict[int, Std]], local_world_size: int) -> dict[int, Std]: if isinstance(val_or_map, Std): return dict.fromkeys(range(local_world_size), val_or_map) else: map = {} for i in range(local_world_size): map[i] = val_or_map.get(i, Std.NONE) return map", - "docstring": "Certain APIs take redirect settings either as a single value (e.g. apply to all local ranks) or as an explicit user-provided mapping. This method is a convenience method that converts a value or mapping into a mapping. Example: :: to_map(Std.OUT, local_world_size=2) # returns: {0: Std.OUT, 1: Std.OUT} to_map({1: Std.OUT}, local_world_size=2) # returns: {0: Std.NONE, 1: Std.OUT} to_map( {0: Std.OUT, 1: Std.OUT}, local_world_size=2 ) # returns: {0: Std.OUT, 1: Std.OUT}", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\api.py", - "ast_data": "FunctionDef name:to_map arguments arg:val_or_map type:Union[Std, dict[int, Std]] arg:local_world_size type:int If Call call:isinstance Return return:yes Assign For Call call:range Assign Call call:get Return return:yes" - }, - { - "library": "scipy", - "name": "tol", - "source_code": "@property def tol(self): return self._tol", - "docstring": "positive float: The desired relative tolerance of calculations. Left unspecified, calculations may be faster; when provided, calculations may be more likely to meet the desired accuracy.", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py", - "ast_data": "FunctionDef name:tol arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, filename, key_column_index = TextFileIndex.LINE_NUMBER, value_column_index = TextFileIndex.WHOLE_LINE, vocab_size = None, delimiter = '\\t', name = 'text_file_string_table_init'): super(TextFileStringTableInitializer, self).__init__(filename, dtypes.int64, key_column_index, dtypes.string, value_column_index, vocab_size = vocab_size, delimiter = delimiter, name = name)", - "docstring": "Constructs an initializer for an id-to-string table from a text file. It populates a table that its key and value types are int64 and string, respectively. It generates one key-value pair per line. The content of the key and value are specified by and . - TextFileIndex.LINE_NUMBER means use the line number starting from zero, expects data type int64. - TextFileIndex.WHOLE_LINE means use the whole line content, expects data type string or int64. - A value >=0 means use the index (starting at zero) of the split line based on . Args: filename: The filename of the text file to be used for initialization. The path must be accessible from wherever the graph is initialized (eg. trainer or eval workers). The filename may be a scalar . key_column_index: The column index from the text file to get the keys from. The default is to use the line number, starting from zero. value_column_index: The column index from the text file to get the values from. The default is to use the whole line content. vocab_size: The number of elements in the file, if known. delimiter: The delimiter to separate fields in a line. name: Optional name for the op. Raises: TypeError: when the filename is empty, or when the table key and value data types do not match the expected data types.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:filename arg:key_column_index arg:value_column_index arg:vocab_size arg:delimiter arg:name" - }, - { - "library": "tensorflow", - "name": "read_model_from_bytearray", - "source_code": "def read_model_from_bytearray(model_bytearray): model = convert_bytearray_to_object(model_bytearray) if sys.byteorder = = 'big': byte_swap_tflite_model_obj(model, 'little', 'big') for buffer in model.buffers: if buffer.offset: buffer.data = model_bytearray[buffer.offset: buffer.offset + buffer.size] buffer.offset = 0 buffer.size = 0 for subgraph in model.subgraphs: for op in subgraph.operators: if op.largeCustomOptionsOffset: op.customOptions = model_bytearray[op.largeCustomOptionsOffset: op.largeCustomOptionsOffset + op.largeCustomOptionsSize] op.largeCustomOptionsOffset = 0 op.largeCustomOptionsSize = 0 return model", - "docstring": "Reads a tflite model as a python object. Args: model_bytearray: TFLite model in bytearray format. Returns: A python object corresponding to the input tflite file.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py", - "ast_data": "FunctionDef name:read_model_from_bytearray arguments arg:model_bytearray Assign Call call:convert_bytearray_to_object If Compare op:Eq For If Assign Assign Assign For For If Assign Assign Assign Return return:yes" - }, - { - "library": "django", - "name": "bad_request", - "source_code": "@requires_csrf_token def bad_request(request, exception, template_name = ERROR_400_TEMPLATE_NAME): try: template = loader.get_template(template_name) body = template.render(request = request) except TemplateDoesNotExist: if template_name ! = ERROR_400_TEMPLATE_NAME: raise return HttpResponseBadRequest(ERROR_PAGE_TEMPLATE % {'title': 'Bad Request (400)', 'details': ''}) return HttpResponseBadRequest(body)", - "docstring": "400 error handler. Templates: :template: Context: None", - "type": "function", - "file_path": "django\\django\\views\\defaults.py", - "ast_data": "FunctionDef name:bad_request arguments arg:request arg:exception arg:template_name Try Assign Call call:get_template Assign Call call:render ExceptHandler If Compare op:NotEq Raise Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "pdf", - "source_code": "def pdf(self, x, mean = None, cov = 1, allow_singular = False): params = self._process_parameters(mean, cov, allow_singular) dim, mean, cov_object = params x = self._process_quantiles(x, dim) out = np.exp(self._logpdf(x, mean, cov_object)) if np.any(cov_object.rank < dim): out_of_bounds = ~cov_object._support_mask(x - mean) out[out_of_bounds] = 0.0 return _squeeze_output(out)", - "docstring": "Multivariate normal probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of denoting the components. %(_mvn_doc_default_callparams)s Returns ------- pdf : ndarray or scalar Probability density function evaluated at Notes ----- %(_mvn_doc_callparams_note)s", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_multivariate.py", - "ast_data": "FunctionDef name:pdf arguments arg:self arg:x arg:mean arg:cov arg:allow_singular Assign Call call:_process_parameters Assign Assign Call call:_process_quantiles Assign Call call:exp If Call call:any Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "modify_model_interface", - "source_code": "def modify_model_interface(input_file, output_file, input_type, output_type): input_type_int = _parse_type_to_int(input_type, 'input_type') output_type_int = _parse_type_to_int(output_type, 'output_type') status = _pywrap_modify_model_interface.modify_model_interface(input_file, output_file, input_type_int, output_type_int) if status ! = 0: raise RuntimeError('Error occurred when trying to modify the model input type from float to {input_type} and output type from float to {output_type}.'.format(input_type = input_type, output_type = output_type))", - "docstring": "Modify a quantized model's interface (input/output) from float to integer. Args: input_file: Full path name to the input tflite file. output_file: Full path name to the output tflite file. input_type: Final input interface type. output_type: Final output interface type. Raises: RuntimeError: If the modification of the model interface was unsuccessful. ValueError: If the input_type or output_type is unsupported.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\lite\\tools\\optimize\\python\\modify_model_interface_lib.py", - "ast_data": "FunctionDef name:modify_model_interface arguments arg:input_file arg:output_file arg:input_type arg:output_type Assign Call call:_parse_type_to_int Assign Call call:_parse_type_to_int Assign Call call:modify_model_interface If Compare op:NotEq Raise raises:RuntimeError('Error occurred when trying to modify the model input type from float to {input_type} and output type from float to {output_type}.'.format(input_type=input_type, output_type=output_type))" - }, - { - "library": "scipy", - "name": "Sphere", - "source_code": "class Sphere(Benchmark): change_dimensionality = True def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N)) self.global_optimum = [[0 for _ in range(self.N)]] self.fglob = 0.0 def fun(self, x, *args): self.nfev + = 1 return sum(x ** 2)", - "docstring": "Sphere objective function. This class defines the Sphere [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Sphere}}(x) = \\sum_{i=1}^{n} x_i^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO Jamil has stupid limits", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py", - "ast_data": "ClassDef name:Sphere Assign FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_verts", - "source_code": "def get_verts(self): trans = self.get_transform() path = self.get_path() polygons = path.to_polygons(trans) if len(polygons): return polygons[0] return []", - "docstring": "Return a copy of the vertices used in this patch. If the patch contains Bézier curves, the curves will be interpolated by line segments. To access the curves as curves, use .", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:get_verts arguments arg:self Assign Call call:get_transform Assign Call call:get_path Assign Call call:to_polygons If Call call:len Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "is_output_of_multi_outputs_template", - "source_code": "def is_output_of_multi_outputs_template(input_buf: Optional[Union[Buffer, Operation]]) -> bool: from . import ir return isinstance(input_buf, ir.MultiOutput) and len(input_buf.inputs) = = 1 and is_multi_outputs_template(input_buf.inputs[0])", - "docstring": "Check if input buffer is a output of multi-outputs template buffer", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\utils.py", - "ast_data": "FunctionDef name:is_output_of_multi_outputs_template arguments arg:input_buf type:Optional[Union[Buffer, Operation]] Return return:yes" - }, - { - "library": "pygame", - "name": "display_capture_filter_properties", - "source_code": "def display_capture_filter_properties(self): self.dev.displaycapturefilterproperties()", - "docstring": "Displays a dialog containing the property page of the capture filter. For VfW drivers you may find the option to select the resolution most likely here.", - "type": "method", - "file_path": "pygame\\src_py\\_camera_vidcapture.py", - "ast_data": "FunctionDef name:display_capture_filter_properties arguments arg:self" - }, - { - "library": "scipy", - "name": "proc_fpool_nog", - "source_code": "def proc_fpool_nog(self): for v in self.fpool: self.compute_sfield(v) self.fpool = set()", - "docstring": "Process all field functions with no constraints supplied.", - "type": "method", - "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_vertex.py", - "ast_data": "FunctionDef name:proc_fpool_nog arguments arg:self For Assign Call call:set" - }, - { - "library": "pytorch", - "name": "stride", - "source_code": "def stride(self, node: IRNode, index: int, default_value: int = 0) -> str: if node is None: return str(default_value) index = _normalize_idx(index, len(node.get_size())) if index < 0: return str(default_value) stride = node.get_stride()[index] if V.graph.sizevars.statically_known_leq(stride, 1): return str(stride) return self.find_symbol(node, 'stride', dim = index) or str(stride)", - "docstring": "Hook called from template code to get the stride of an arg. Generates code which represents stride of a given node at index. If node is None, returns default_value. TODO: Will add needed args to pass it in if it is dynamic.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py", - "ast_data": "FunctionDef name:stride arguments arg:self arg:node type:IRNode arg:index type:int arg:default_value type:int If Compare op:Is Return return:yes Assign Call call:_normalize_idx If Compare op:Lt Return return:yes Assign If Call call:statically_known_leq Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "resolve_relation", - "source_code": "def resolve_relation(model, app_label = None, model_name = None): if isinstance(model, str): if model = = RECURSIVE_RELATIONSHIP_CONSTANT: if app_label is None or model_name is None: raise TypeError('app_label and model_name must be provided to resolve recursive relationships.') return (app_label, model_name) if '.' in model: app_label, model_name = model.split('.', 1) return (app_label, model_name.lower()) if app_label is None: raise TypeError('app_label must be provided to resolve unscoped model relationships.') return (app_label, model.lower()) return (model._meta.app_label, model._meta.model_name)", - "docstring": "Turn a model class or model reference string and return a model tuple. app_label and model_name are used to resolve the scope of recursive and unscoped model relationship.", - "type": "function", - "file_path": "django\\django\\db\\migrations\\utils.py", - "ast_data": "FunctionDef name:resolve_relation arguments arg:model arg:app_label arg:model_name If Call call:isinstance If Compare op:Eq If BoolOp Compare op:Is Compare op:Is Raise raises:TypeError('app_label and model_name must be provided to resolve recursive relationships.') Return return:yes If Compare op:In Assign Call call:split Return return:yes If Compare op:Is Raise raises:TypeError('app_label must be provided to resolve unscoped model relationships.') Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "to_discrete", - "source_code": "def to_discrete(self, dt, method = 'zoh', alpha = None): return TransferFunction(*cont2discrete((self.num, self.den), dt, method = method, alpha = alpha)[: -1], dt = dt)", - "docstring": "Returns the discretized system. Parameters: See for details. Returns ------- sys: instance of and", - "type": "method", - "file_path": "scipy\\scipy\\signal\\_ltisys.py", - "ast_data": "FunctionDef name:to_discrete arguments arg:self arg:dt arg:method arg:alpha Return return:yes" - }, - { - "library": "tensorflow", - "name": "resize_images_v2", - "source_code": "@dispatch.dispatch_for_api(image_ops.resize_images_v2) def resize_images_v2(images: ragged_tensor.RaggedTensor, size, method = image_ops.ResizeMethod.BILINEAR, preserve_aspect_ratio = False, antialias = False, name = None): with ops.name_scope(name, 'RaggedResizeImages', [images, size]): return _resize_images(image_ops.resize_images_v2, images, size, method = method, preserve_aspect_ratio = preserve_aspect_ratio, antialias = antialias)", - "docstring": "RaggedTensor dispatcher for tf.image.resize (tf-v2).", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_image_ops.py", - "ast_data": "FunctionDef name:resize_images_v2 arguments arg:images type:ragged_tensor.RaggedTensor arg:size arg:method arg:preserve_aspect_ratio arg:antialias arg:name Call call:dispatch_for_api With Return return:yes" - }, - { - "library": "matplotlib", - "name": "rotated", - "source_code": "def rotated(self, *, deg = None, rad = None): if deg is None and rad is None: raise ValueError('One of deg or rad is required') if deg is not None and rad is not None: raise ValueError('Only one of deg and rad can be supplied') new_marker = MarkerStyle(self) if new_marker._user_transform is None: new_marker._user_transform = Affine2D() if deg is not None: new_marker._user_transform.rotate_deg(deg) if rad is not None: new_marker._user_transform.rotate(rad) return new_marker", - "docstring": "Return a new version of this marker rotated by specified angle. Parameters ---------- deg : float, optional Rotation angle in degrees. rad : float, optional Rotation angle in radians. .. note:: You must specify exactly one of deg or rad.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\markers.py", - "ast_data": "FunctionDef name:rotated arguments arg:self If BoolOp Compare op:Is Compare op:Is Raise raises:ValueError('One of deg or rad is required') If BoolOp Compare op:IsNot Compare op:IsNot Raise raises:ValueError('Only one of deg and rad can be supplied') Assign Call call:MarkerStyle If Compare op:Is Assign Call call:Affine2D If Compare op:IsNot If Compare op:IsNot Return return:yes" - }, - { - "library": "django", - "name": "get_users", - "source_code": "def get_users(self, email): email_field_name = UserModel.get_email_field_name() active_users = UserModel._default_manager.filter(**{'%s__iexact' % email_field_name: email, 'is_active': True}) return (u for u in active_users if u.has_usable_password() and _unicode_ci_compare(email, getattr(u, email_field_name)))", - "docstring": "Given an email, return matching user(s) who should receive a reset. This allows subclasses to more easily customize the default policies that prevent inactive users and users with unusable passwords from resetting their password.", - "type": "method", - "file_path": "django\\django\\contrib\\auth\\forms.py", - "ast_data": "FunctionDef name:get_users arguments arg:self arg:email Assign Call call:get_email_field_name Assign Call call:filter Return return:yes" - }, - { - "library": "feincms", - "name": "Region", - "source_code": "class Region: def __init__(self, key, title, *args): self.key = key self.title = title self.inherited = args and args[0] = = 'inherited' or False self._content_types = [] def __str__(self): return force_str(self.title) @property def content_types(self): return [(ct.__name__.lower(), ct._meta.verbose_name) for ct in self._content_types]", - "docstring": "This class represents a region inside a template. Example regions might be 'main' and 'sidebar'.", - "type": "class", - "file_path": "feincms\\feincms\\models.py", - "ast_data": "ClassDef name:Region FunctionDef name:__init__ arguments arg:self arg:key arg:title vararg:args Assign Assign Assign BoolOp BoolOp Compare op:Eq Assign FunctionDef name:__str__ arguments arg:self Return return:yes FunctionDef name:content_types arguments arg:self Return return:yes" - }, - { - "library": "authlib", - "name": "validate_iat", - "source_code": "def validate_iat(self, now, leeway): if 'iat' in self: iat = self['iat'] if not _validate_numeric_time(iat): raise InvalidClaimError('iat') if iat > now + leeway: raise InvalidTokenError(description = 'The token is not valid as it was issued in the future')", - "docstring": "The \"iat\" (issued at) claim identifies the time at which the JWT was issued. This claim can be used to determine the age of the JWT. Implementers MAY provide for some small leeway, usually no more than a few minutes, to account for clock skew. Its value MUST be a number containing a NumericDate value. Use of this claim is OPTIONAL.", - "type": "method", - "file_path": "authlib\\authlib\\jose\\rfc7519\\claims.py", - "ast_data": "FunctionDef name:validate_iat arguments arg:self arg:now arg:leeway If Compare op:In Assign If Raise raises:InvalidClaimError('iat') If Compare op:Gt Raise raises:InvalidTokenError(description='The token is not valid as it was issued in the future')" - }, - { - "library": "tensorflow", - "name": "OpResolverType", - "source_code": "@_tf_export('lite.experimental.OpResolverType') @enum.unique class OpResolverType(enum.Enum): AUTO = 0 BUILTIN = 1 BUILTIN_REF = 2 BUILTIN_WITHOUT_DEFAULT_DELEGATES = 3", - "docstring": "Different types of op resolvers for Tensorflow Lite. * : Indicates the op resolver that is chosen by default in TfLite Python, which is the \"BUILTIN\" as described below. * : Indicates the op resolver for built-in ops with optimized kernel implementation. * : Indicates the op resolver for built-in ops with reference kernel implementation. It's generally used for testing and debugging. * : Indicates the op resolver for built-in ops with optimized kernel implementation, but it will disable the application of default TfLite delegates (like the XNNPACK delegate) to the model graph. Generally this should not be used unless there are issues with the default configuration.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\lite\\python\\interpreter.py", - "ast_data": "ClassDef name:OpResolverType Call call:_tf_export Assign Assign Assign Assign" - }, - { - "library": "matplotlib", - "name": "get_geometry", - "source_code": "def get_geometry(self): rows, cols = self.get_gridspec().get_geometry() return (rows, cols, self.num1, self.num2)", - "docstring": "Return the subplot geometry as tuple `GridSpec`).", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py", - "ast_data": "FunctionDef name:get_geometry arguments arg:self Assign Call call:get_geometry Return return:yes" - }, - { - "library": "tensorflow", - "name": "new_list", - "source_code": "def new_list(iterable = None): if iterable: elements = tuple(iterable) else: elements = () if elements: return _py_list_new(elements) return tf_tensor_list_new(elements)", - "docstring": "The list constructor. Args: iterable: Optional elements to fill the list with. Returns: A list-like object. The exact return value depends on the initial elements.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\data_structures.py", - "ast_data": "FunctionDef name:new_list arguments arg:iterable If Assign Call call:tuple Assign If Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "stop_on_exception", - "source_code": "def stop_on_exception(self): return self._coord.stop_on_exception()", - "docstring": "Context handler to stop the supervisor when an exception is raised. See . Returns: A context handler.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py", - "ast_data": "FunctionDef name:stop_on_exception arguments arg:self Return return:yes" - }, - { - "library": "seaborn", - "name": "Area", - "source_code": "@document_properties @dataclass class Area(AreaBase, Mark): color: MappableColor = Mappable('C0') alpha: MappableFloat = Mappable(0.2) fill: MappableBool = Mappable(True) edgecolor: MappableColor = Mappable(depend = 'color') edgealpha: MappableFloat = Mappable(1) edgewidth: MappableFloat = Mappable(rc = 'patch.linewidth') edgestyle: MappableStyle = Mappable('-') baseline: MappableFloat = Mappable(0, grouping = False) def _standardize_coordinate_parameters(self, data, orient): dv = {'x': 'y', 'y': 'x'}[orient] return data.rename(columns = {'baseline': f'{dv}min', dv: f'{dv}max'}) def _postprocess_artist(self, artist, ax, orient): artist.set_linewidth(artist.get_linewidth() * 2) linestyle = artist.get_linestyle() if linestyle[1]: linestyle = (linestyle[0], tuple((x / 2 for x in linestyle[1]))) artist.set_linestyle(linestyle) artist.set_clip_path(artist.get_path(), artist.get_transform() + ax.transData) if self.artist_kws.get('clip_on', True): artist.set_clip_box(ax.bbox) val_idx = ['y', 'x'].index(orient) artist.sticky_edges[val_idx][:] = (0, np.inf)", - "docstring": "A fill mark drawn from a baseline to data values. See also -------- Band : A fill mark representing an interval between values. Examples -------- .. include:: ../docstrings/objects.Area.rst", - "type": "class", - "file_path": "seaborn\\seaborn\\_marks\\area.py", - "ast_data": "ClassDef name:Area FunctionDef name:_standardize_coordinate_parameters arguments arg:self arg:data arg:orient Assign Return return:yes FunctionDef name:_postprocess_artist arguments arg:self arg:artist arg:ax arg:orient Assign Call call:get_linestyle If Assign If Call call:get Assign Call call:index Assign" - }, - { - "library": "pytorch", - "name": "fold", - "source_code": "def fold(input: Tensor, output_size: BroadcastingList2[int], kernel_size: BroadcastingList2[int], dilation: BroadcastingList2[int] = 1, padding: BroadcastingList2[int] = 0, stride: BroadcastingList2[int] = 1) -> Tensor: if has_torch_function_unary(input): return handle_torch_function(fold, (input,), input, output_size, kernel_size, dilation = dilation, padding = padding, stride = stride) return torch._C._nn.col2im(input, _pair(output_size), _pair(kernel_size), _pair(dilation), _pair(padding), _pair(stride))", - "docstring": "Combine an array of sliding local blocks into a large containing tensor. .. warning:: Currently, only unbatched (3D) or batched (4D) image-like output tensors are supported. See :class: for details", - "type": "function", - "file_path": "pytorch\\torch\\nn\\functional.py", - "ast_data": "FunctionDef name:fold arguments arg:input type:Tensor arg:output_size type:BroadcastingList2[int] arg:kernel_size type:BroadcastingList2[int] arg:dilation type:BroadcastingList2[int] arg:padding type:BroadcastingList2[int] arg:stride type:BroadcastingList2[int] If Call call:has_torch_function_unary Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "inverse_log_det_jacobian", - "source_code": "def inverse_log_det_jacobian(self, y, event_ndims, name = 'inverse_log_det_jacobian'): return self._call_inverse_log_det_jacobian(y, event_ndims, name)", - "docstring": "Returns the (log o det o Jacobian o inverse)(y). Mathematically, returns: . (Recall that: .) Note that is the negative of this function, evaluated at . Args: y: . The input to the \"inverse\" Jacobian determinant evaluation. event_ndims: Number of dimensions in the probabilistic events being transformed. Must be greater than or equal to . The result is summed over the final dimensions to produce a scalar Jacobian determinant for each event, i.e. it has shape dimensions. name: The name to give this op. Returns: , if this bijector is injective. If not injective, returns the tuple of local log det Jacobians, , where is the restriction of to the partition . Raises: TypeError: if is specified and is not . NotImplementedError: if is not implemented.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py", - "ast_data": "FunctionDef name:inverse_log_det_jacobian arguments arg:self arg:y arg:event_ndims arg:name Return return:yes" - }, - { - "library": "scipy", - "name": "fourier_shift", - "source_code": "def fourier_shift(input, shift, n = -1, axis = -1, output = None): input = np.asarray(input) output = _get_output_fourier_complex(output, input) axis = normalize_axis_index(axis, input.ndim) shifts = _ni_support._normalize_sequence(shift, input.ndim) shifts = np.asarray(shifts, dtype = np.float64) if not shifts.flags.contiguous: shifts = shifts.copy() _nd_image.fourier_shift(input, shifts, n, axis, output) return output", - "docstring": "Multidimensional Fourier shift filter. The array is multiplied with the Fourier transform of a shift operation. Parameters ---------- input : array_like The input array. shift : float or sequence The size of the box used for filtering. If a float, is the same for all axes. If a sequence, has to contain one value for each axis. n : int, optional If is negative (default), then the input is assumed to be the result of a complex fft. If is larger than or equal to zero, the input is assumed to be the result of a real fft, and gives the length of the array before transformation along the real transform direction. axis : int, optional The axis of the real transform. output : ndarray, optional If given, the result of shifting the input is placed in this array. Returns ------- fourier_shift : ndarray The shifted input. Examples -------- >>> from scipy import ndimage, datasets >>> import matplotlib.pyplot as plt >>> import numpy.fft >>> fig, (ax1, ax2) = plt.subplots(1, 2) >>> plt.gray() # show the filtered result in grayscale >>> ascent = datasets.ascent() >>> input_ = numpy.fft.fft2(ascent) >>> result = ndimage.fourier_shift(input_, shift=200) >>> result = numpy.fft.ifft2(result) >>> ax1.imshow(ascent) >>> ax2.imshow(result.real) # the imaginary part is an artifact >>> plt.show()", - "type": "function", - "file_path": "scipy\\scipy\\ndimage\\_fourier.py", - "ast_data": "FunctionDef name:fourier_shift arguments arg:input arg:shift arg:n arg:axis arg:output Assign Call call:asarray Assign Call call:_get_output_fourier_complex Assign Call call:normalize_axis_index Assign Call call:_normalize_sequence Assign Call call:asarray If Assign Call call:copy Return return:yes" - }, - { - "library": "scipy", - "name": "parse", - "source_code": "def parse(version): try: return Version(version) except InvalidVersion: return LegacyVersion(version)", - "docstring": "Parse the given version string and return either a :class: object or a :class: object depending on if the given version is a valid PEP 440 version or a legacy version.", - "type": "function", - "file_path": "scipy\\scipy\\_lib\\_pep440.py", - "ast_data": "FunctionDef name:parse arguments arg:version Try Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "authlib", - "name": "authenticate_user", - "source_code": "def authenticate_user(self, authorization_code): raise NotImplementedError()", - "docstring": "Authenticate the user related to this authorization_code. Developers MUST implement this method in subclass, e.g.:: def authenticate_user(self, authorization_code): return User.get(authorization_code.user_id) :param authorization_code: AuthorizationCode object :return: user", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\authorization_code.py", - "ast_data": "FunctionDef name:authenticate_user arguments arg:self arg:authorization_code Raise raises:NotImplementedError()" - }, - { - "library": "kornia", - "name": "StraightThroughEstimator", - "source_code": "class StraightThroughEstimator(nn.Module): def __init__(self, target_fn: nn.Module, grad_fn: Optional[Callable[..., Any]] = None) -> None: super().__init__() self.target_fn = target_fn self.grad_fn = grad_fn def __repr__(self) -> str: return f'{self.__class__.__name__}(target_fn = {self.target_fn}, grad_fn = {self.grad_fn})' def forward(self, input: Tensor) -> Tensor: out = self.target_fn(input) if not isinstance(out, Tensor): raise NotImplementedError('Only Tensor is supported at the moment. Feel free to contribute to https: //github.com/kornia/kornia.') output = STEFunction.apply(input, out, self.grad_fn) return output", - "docstring": "Straight-Through Estimation (STE) module. STE wraps the `` block. >>> import kornia.augmentation as K >>> input = torch.randn(1, 1, 4, 4, requires_grad = True) >>> aug = K.ImageSequential( ... K.RandomAffine((77, 77)), ... StraightThroughEstimator(K.RandomPosterize(3, p=1.), grad_fn=None), ... K.RandomRotation((15, 15)), ... ) >>> aug(input).mean().backward() >>> input.grad tensor([[[[0.0422, 0.0626, 0.0566, 0.0422], [0.0566, 0.0626, 0.0626, 0.0626], [0.0626, 0.0626, 0.0626, 0.0566], [0.0422, 0.0566, 0.0626, 0.0422]]]])", - "type": "class", - "file_path": "kornia\\kornia\\grad_estimator\\ste.py", - "ast_data": "ClassDef name:StraightThroughEstimator FunctionDef name:__init__ arguments arg:self arg:target_fn type:nn.Module arg:grad_fn type:Optional[Callable[..., Any]] Assign Assign FunctionDef name:__repr__ arguments arg:self Return return:yes FunctionDef name:forward arguments arg:self arg:input type:Tensor Assign Call call:target_fn If Raise raises:NotImplementedError('Only Tensor is supported at the moment. Feel free to contribute to https://github.com/kornia/kornia.') Assign Call call:apply Return return:yes" - }, - { - "library": "scikit-learn", - "name": "strip_newsgroup_quoting", - "source_code": "def strip_newsgroup_quoting(text): good_lines = [line for line in text.split('\\n') if not _QUOTE_RE.search(line)] return '\\n'.join(good_lines)", - "docstring": "Given text in \"news\" format, strip lines beginning with the quote characters > or |, plus lines that often introduce a quoted section (for example, because they contain the string 'writes:'.) Parameters ---------- text : str The text from which to remove the signature block.", - "type": "function", - "file_path": "scikit-learn\\sklearn\\datasets\\_twenty_newsgroups.py", - "ast_data": "FunctionDef name:strip_newsgroup_quoting arguments arg:text Assign Return return:yes" - }, - { - "library": "pandas", - "name": "mode", - "source_code": "def mode(self, dropna: bool = True) -> Series: values = self._values if isinstance(values, np.ndarray): res_values, _ = algorithms.mode(values, dropna = dropna) else: res_values = values._mode(dropna = dropna) return self._constructor(res_values, index = range(len(res_values)), name = self.name, copy = False, dtype = self.dtype).__finalize__(self, method = 'mode')", - "docstring": "Return the mode(s) of the Series. The mode is the value that appears most often. There can be multiple modes. Always returns Series even if only one value is returned. Parameters ---------- dropna : bool, default True Don't consider counts of NaN/NaT. Returns ------- Series Modes of the Series in sorted order. See Also -------- numpy.mode : Equivalent numpy function for computing median. Series.sum : Sum of the values. Series.median : Median of the values. Series.std : Standard deviation of the values. Series.var : Variance of the values. Series.min : Minimum value. Series.max : Maximum value. Examples -------- >>> s = pd.Series([2, 4, 2, 2, 4, None]) >>> s.mode() 0 2.0 dtype: float64 More than one mode: >>> s = pd.Series([2, 4, 8, 2, 4, None]) >>> s.mode() 0 2.0 1 4.0 dtype: float64 With and without considering null value: >>> s = pd.Series([2, 4, None, None, 4, None]) >>> s.mode(dropna=False) 0 NaN dtype: float64 >>> s = pd.Series([2, 4, None, None, 4, None]) >>> s.mode() 0 4.0 dtype: float64", - "type": "method", - "file_path": "pandas\\pandas\\core\\series.py", - "ast_data": "FunctionDef name:mode arguments arg:self arg:dropna type:bool Assign If Call call:isinstance Assign Call call:mode Assign Call call:_mode Return return:yes" - }, - { - "library": "scikit-learn", - "name": "score_samples", - "source_code": "def score_samples(self, X): check_is_fitted(self) X = validate_data(self, X, reset = False) Xr = X - self.mean_ precision = self.get_precision() n_features = X.shape[1] log_like = -0.5 * (Xr * np.dot(Xr, precision)).sum(axis = 1) log_like - = 0.5 * (n_features * log(2.0 * np.pi) - fast_logdet(precision)) return log_like", - "docstring": "Compute the log-likelihood of each sample. Parameters ---------- X : ndarray of shape (n_samples, n_features) The data. Returns ------- ll : ndarray of shape (n_samples,) Log-likelihood of each sample under the current model.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\decomposition\\_factor_analysis.py", - "ast_data": "FunctionDef name:score_samples arguments arg:self arg:X Assign Call call:validate_data Assign Assign Call call:get_precision Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "ConvBnReLU1d", - "source_code": "class ConvBnReLU1d(_FusedModule): def __init__(self, conv, bn, relu): assert type_before_parametrizations(conv) = = Conv1d and type_before_parametrizations(bn) = = BatchNorm1d and (type_before_parametrizations(relu) = = ReLU), f'Incorrect types for input modules{type_before_parametrizations(conv)}{type_before_parametrizations(bn)}{type_before_parametrizations(relu)}' super().__init__(conv, bn, relu)", - "docstring": "This is a sequential container which calls the Conv 1d, Batch Norm 1d, and ReLU modules. During quantization this will be replaced with the corresponding fused module.", - "type": "class", - "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py", - "ast_data": "ClassDef name:ConvBnReLU1d FunctionDef name:__init__ arguments arg:self arg:conv arg:bn arg:relu" - }, - { - "library": "tensorflow", - "name": "is_recording_summaries", - "source_code": "def is_recording_summaries(): if _summary_state.writer is None: return False if _summary_state.is_recording is None: return False return _summary_state.is_recording", - "docstring": "Returns non-Tensor boolean indicating if summaries are being recorded.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", - "ast_data": "FunctionDef name:is_recording_summaries arguments If Compare op:Is Return return:yes If Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "AsinhScale", - "source_code": "class AsinhScale(ScaleBase): name = 'asinh' auto_tick_multipliers = {3: (2,), 4: (2,), 5: (2,), 8: (2, 4), 10: (2, 5), 16: (2, 4, 8), 64: (4, 16), 1024: (256, 512)} def __init__(self, axis, *, linear_width = 1.0, base = 10, subs = 'auto', **kwargs): super().__init__(axis) self._transform = AsinhTransform(linear_width) self._base = int(base) if subs = = 'auto': self._subs = self.auto_tick_multipliers.get(self._base) else: self._subs = subs linear_width = property(lambda self: self._transform.linear_width) def get_transform(self): return self._transform def set_default_locators_and_formatters(self, axis): axis.set(major_locator = AsinhLocator(self.linear_width, base = self._base), minor_locator = AsinhLocator(self.linear_width, base = self._base, subs = self._subs), minor_formatter = NullFormatter()) if self._base > 1: axis.set_major_formatter(LogFormatterSciNotation(self._base)) else: axis.set_major_formatter('{x: .3g}')", - "docstring": "A quasi-logarithmic scale based on the inverse hyperbolic sine (asinh) For values close to zero, this is essentially a linear scale, but for large magnitude values (either positive or negative) it is asymptotically logarithmic. The transition between these linear and logarithmic regimes is smooth, and has no discontinuities in the function gradient in contrast to the (\"symlog\") scale. Specifically, the transformation of an axis coordinate :math: is :math: where :math: is the effective width of the linear region of the transformation. In that region, the transformation is :math:. For large values of :math: the transformation behaves as :math:. .. note:: This API is provisional and may be revised in the future based on early user feedback.", - "type": "class", - "file_path": "matplotlib\\lib\\matplotlib\\scale.py", - "ast_data": "ClassDef name:AsinhScale Assign Assign FunctionDef name:__init__ arguments arg:self arg:axis kwarg:kwargs Assign Call call:AsinhTransform Assign Call call:int If Compare op:Eq Assign Call call:get Assign Assign Call call:property FunctionDef name:get_transform arguments arg:self Return return:yes FunctionDef name:set_default_locators_and_formatters arguments arg:self arg:axis If Compare op:Gt" - }, - { - "library": "pytorch", - "name": "stream", - "source_code": "def stream(stream: Optional['torch.xpu.Stream']) -> StreamContext: return StreamContext(stream)", - "docstring": "Wrap around the Context-manager StreamContext that selects a given stream. Arguments: stream (Stream): selected stream. This manager is a no-op if it's ``.", - "type": "function", - "file_path": "pytorch\\torch\\xpu\\__init__.py", - "ast_data": "FunctionDef name:stream arguments arg:stream type:Optional['torch.xpu.Stream'] Return return:yes" - }, - { - "library": "django", - "name": "prepare_lookup_value", - "source_code": "def prepare_lookup_value(key, value, separator = ', '): if isinstance(value, list): return [prepare_lookup_value(key, v, separator = separator) for v in value] if key.endswith('__in'): value = value.split(separator) elif key.endswith('__isnull'): value = value.lower() not in ('', 'false', '0') return value", - "docstring": "Return a lookup value prepared to be used in queryset filtering.", - "type": "function", - "file_path": "django\\django\\contrib\\admin\\utils.py", - "ast_data": "FunctionDef name:prepare_lookup_value arguments arg:key arg:value arg:separator If Call call:isinstance Return return:yes If Call call:endswith Assign Call call:split If Call call:endswith Assign Compare op:NotIn Return return:yes" - }, - { - "library": "tensorflow", - "name": "stop_recording", - "source_code": "@contextlib.contextmanager def stop_recording(): is_stopped = pywrap_tfe.TFE_Py_TapeSetIsStopped() try: if not is_stopped: pywrap_tfe.TFE_Py_TapeSetStopOnThread() yield finally: if not is_stopped: pywrap_tfe.TFE_Py_TapeSetRestartOnThread()", - "docstring": "Stop all gradient recording (backprop and forwardprop).", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\eager\\record.py", - "ast_data": "FunctionDef name:stop_recording arguments Assign Call call:TFE_Py_TapeSetIsStopped Try If If" - }, - { - "library": "scikit-learn", - "name": "set_output", - "source_code": "def set_output(self, *, transform = None): super().set_output(transform = transform) transformers = (trans for _, trans, _ in chain(self.transformers, getattr(self, 'transformers_', [])) if trans not in {'passthrough', 'drop'}) for trans in transformers: _safe_set_output(trans, transform = transform) if self.remainder not in {'passthrough', 'drop'}: _safe_set_output(self.remainder, transform = transform) return self", - "docstring": "Set the output container when and are called. Calling will set the output of all estimators in and . Parameters ---------- transform : {\"default\", \"pandas\", \"polars\"}, default=None Configure output of and . - : Default output format of a transformer - : DataFrame output - : Polars output - : Transform configuration is unchanged .. versionadded:: 1.4 option was added. Returns ------- self : estimator instance Estimator instance.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py", - "ast_data": "FunctionDef name:set_output arguments arg:self Assign For If Compare op:NotIn Return return:yes" - }, - { - "library": "pytorch", - "name": "consume_prefix_in_state_dict_if_present", - "source_code": "def consume_prefix_in_state_dict_if_present(state_dict: dict[str, Any], prefix: str) -> None: keys = list(state_dict.keys()) for key in keys: if key.startswith(prefix): newkey = key[len(prefix):] state_dict[newkey] = state_dict.pop(key) if hasattr(state_dict, '_metadata'): keys = list(state_dict._metadata.keys()) for key in keys: if len(key) = = 0: continue if key = = prefix.replace('.', '') or key.startswith(prefix): newkey = key[len(prefix):] state_dict._metadata[newkey] = state_dict._metadata.pop(key)", - "docstring": "Strip the prefix in state_dict in place, if any. .. note:: Given a from a DP/DDP model, a local model can load it by applying before calling :meth:. Args: state_dict (OrderedDict): a state-dict to be loaded to the model. prefix (str): prefix.", - "type": "function", - "file_path": "pytorch\\torch\\nn\\modules\\utils.py", - "ast_data": "FunctionDef name:consume_prefix_in_state_dict_if_present arguments arg:state_dict type:dict[str, Any] arg:prefix type:str Assign Call call:list For If Call call:startswith Assign Assign Call call:pop If Call call:hasattr Assign Call call:list For If Compare op:Eq If BoolOp Compare op:Eq Call call:startswith Assign Assign Call call:pop" - }, - { - "library": "django", - "name": "copy_exception", - "source_code": "def copy_exception(exc, backend = None): backend = backend or exc.backend new = exc.__class__(*exc.args, tried = exc.tried, backend = backend, chain = exc.chain) if hasattr(exc, 'template_debug'): new.template_debug = exc.template_debug return new", - "docstring": "Create a new TemplateDoesNotExist. Preserve its declared attributes and template debug data but discard __traceback__, __context__, and __cause__ to make this object suitable for keeping around (in a cache, for example).", - "type": "function", - "file_path": "django\\django\\template\\backends\\django.py", - "ast_data": "FunctionDef name:copy_exception arguments arg:exc arg:backend Assign BoolOp Assign Call call:__class__ If Call call:hasattr Assign Return return:yes" - }, - { - "library": "scrapy", - "name": "stop", - "source_code": "async def stop(self) -> None: await deferred_to_future(self._stop())", - "docstring": "Stops simultaneously all the crawling jobs taking place. Completes when they all have ended.", - "type": "method", - "file_path": "scrapy\\scrapy\\crawler.py", - "ast_data": "AsyncFunctionDef name:stop arguments arg:self" - }, - { - "library": "pytorch", - "name": "InflatableArg", - "source_code": "class InflatableArg(NamedTuple): value: Any fmt: str = '{}' fmt_fn: str = ''", - "docstring": "Helper type for bundled inputs. 'value' is the compressed/deflated input that is stored in the model. Value must be of the same type as the argument to the function that it is a deflated input for. 'fmt' is a formatable code string that is executed to inflate the compressed data into the appropriate input. It can use 'value' as an input to the format str. It must result in a value of the same type as 'value'. 'fmt_fn' is a formatable function code string that is executed to inflate the compressed data into the appropriate input. It must result in a value of the same type as 'value'. The function name should be the formatable part of the string. Note: Only top level InflatableArgs can be inflated. i.e. you cannot place an inflatable arg inside of some other structure. You should instead create an inflatable arg such that the fmt code string returns the full structure of your input.", - "type": "class", - "file_path": "pytorch\\torch\\utils\\bundled_inputs.py", - "ast_data": "ClassDef name:InflatableArg" - }, - { - "library": "django", - "name": "get_table_list", - "source_code": "def get_table_list(self, cursor): cursor.execute(\"\\n SELECT\\n c.relname, \\n CASE\\n WHEN c.relispartition THEN 'p'\\n WHEN c.relkind IN ('m', 'v') THEN 'v'\\n ELSE 't'\\n END, \\n obj_description(c.oid, 'pg_class')\\n FROM pg_catalog.pg_class c\\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\\n WHERE c.relkind IN ('f', 'm', 'p', 'r', 'v')\\n AND n.nspname NOT IN ('pg_catalog', 'pg_toast')\\n AND pg_catalog.pg_table_is_visible(c.oid)\\n \") return [TableInfo(*row) for row in cursor.fetchall() if row[0] not in self.ignored_tables]", - "docstring": "Return a list of table and view names in the current database.", - "type": "method", - "file_path": "django\\django\\db\\backends\\postgresql\\introspection.py", - "ast_data": "FunctionDef name:get_table_list arguments arg:self arg:cursor Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_rel_timestamps", - "source_code": "def get_rel_timestamps(self, node_name, output_slot, debug_op, device_name = None): device_name = self._infer_device_name(device_name, node_name) watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op) if watch_key not in self._watch_key_to_datum[device_name]: raise WatchKeyDoesNotExistInDebugDumpDirError('Watch key \"%s\" does not exist in the debug dump' % watch_key) return self._watch_key_to_rel_time[device_name][watch_key]", - "docstring": "Get the relative timestamp from for a debug-dumped tensor. Relative timestamp means (absolute timestamp - ), where is the absolute timestamp of the first dumped tensor in the dump root. The tensor may be dumped multiple times in the dump root directory, so a list of relative timestamps () is returned. Args: node_name: () name of the node that the tensor is produced by. output_slot: () output slot index of tensor. debug_op: () name of the debug op. device_name: () name of the device. If there is only one device or if the specified debug_watch_key exists on only one device, this argument is optional. Returns: ( of ) list of relative timestamps. Raises: WatchKeyDoesNotExistInDebugDumpDirError: If the tensor watch key does not exist in the debug dump data.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", - "ast_data": "FunctionDef name:get_rel_timestamps arguments arg:self arg:node_name arg:output_slot arg:debug_op arg:device_name Assign Call call:_infer_device_name Assign Call call:_get_tensor_watch_key If Compare op:NotIn Raise raises:WatchKeyDoesNotExistInDebugDumpDirError('Watch key \"%s\" does not exist in the debug dump' % watch_key) Return return:yes" - }, - { - "library": "cherrypy", - "name": "kwargs", - "source_code": "@kwargs.setter def kwargs(self, kwargs): cherrypy.serving.request.kwargs = kwargs self._kwargs = kwargs", - "docstring": "Set the named request keyword arguments as :class:.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\_cpdispatch.py", - "ast_data": "FunctionDef name:kwargs arguments arg:self arg:kwargs Assign Assign" - }, - { - "library": "numpy", - "name": "in1d", - "source_code": "def in1d(ar1, ar2, assume_unique = False, invert = False): if not assume_unique: ar1, rev_idx = unique(ar1, return_inverse = True) ar2 = unique(ar2) ar = ma.concatenate((ar1, ar2)) order = ar.argsort(kind = 'mergesort') sar = ar[order] if invert: bool_ar = sar[1:] ! = sar[: -1] else: bool_ar = sar[1:] = = sar[: -1] flag = ma.concatenate((bool_ar, [invert])) indx = order.argsort(kind = 'mergesort')[: len(ar1)] if assume_unique: return flag[indx] else: return flag[indx][rev_idx]", - "docstring": "Test whether each element of an array is also present in a second array. The output is always a masked array. See for more details. We recommend using :func: instead of for new code. See Also -------- isin : Version of this function that preserves the shape of ar1. numpy.in1d : Equivalent function for ndarrays. Examples -------- >>> import numpy as np >>> ar1 = np.ma.array([0, 1, 2, 5, 0]) >>> ar2 = [0, 2] >>> np.ma.in1d(ar1, ar2) masked_array(data=[ True, False, True, False, True], mask=False, fill_value=True)", - "type": "function", - "file_path": "numpy\\numpy\\ma\\extras.py", - "ast_data": "FunctionDef name:in1d arguments arg:ar1 arg:ar2 arg:assume_unique arg:invert If Assign Call call:unique Assign Call call:unique Assign Call call:concatenate Assign Call call:argsort Assign If Assign Compare op:NotEq Assign Compare op:Eq Assign Call call:concatenate Assign If Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "get_source_fields", - "source_code": "def get_source_fields(self): return [e._output_field_or_none for e in self.get_source_expressions()]", - "docstring": "Return the underlying field types used by this aggregate.", - "type": "method", - "file_path": "django\\django\\db\\models\\expressions.py", - "ast_data": "FunctionDef name:get_source_fields arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "__call__", - "source_code": "def __call__(self, x0, y0, width, height, mutation_size): pad = mutation_size * self.pad width = width + 2 * pad height = height + 2 * pad x0, y0 = (x0 - pad, y0 - pad) x1, y1 = (x0 + width, y0 + height) return Path([(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0 - pad, (y0 + y1) / 2), (x0, y0), (x0, y0)], closed = True)", - "docstring": "Given the location and size of the box, return the path of the box around it. Rotation is automatically taken care of. Parameters ---------- x0, y0, width, height : float Box location and size. mutation_size : float Reference scale for the mutation, typically the text font size.", - "type": "method", - "file_path": "matplotlib\\galleries\\users_explain\\text\\annotations.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:x0 arg:y0 arg:width arg:height arg:mutation_size Assign Assign Assign Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "TFLiteSavedModelConverter", - "source_code": "class TFLiteSavedModelConverter(TFLiteConverterBaseV1): def __init__(self, saved_model_dir, saved_model_tags, saved_model_exported_names, experimental_debug_info_func = None): super(TFLiteSavedModelConverter, self).__init__(experimental_debug_info_func) self.saved_model_dir = saved_model_dir self._saved_model_tags = saved_model_tags self._saved_model_exported_names = saved_model_exported_names if len(self._saved_model_exported_names) ! = 1: raise ValueError('Only supports a single signature key.') signature_key = self._saved_model_exported_names[0] result = _freeze_saved_model(self.saved_model_dir, None, None, None, self._saved_model_tags, signature_key) self._graph_def = result[0] self._input_tensors = result[1] self._output_tensors = result[2] self._parse_saved_model_args() @_export_metrics def convert(self): return super(TFLiteSavedModelConverter, self).convert()", - "docstring": "Converts the given SavedModel into TensorFlow Lite model. Attributes: saved_model_dir: Directory of the SavedModel.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", - "ast_data": "ClassDef name:TFLiteSavedModelConverter FunctionDef name:__init__ arguments arg:self arg:saved_model_dir arg:saved_model_tags arg:saved_model_exported_names arg:experimental_debug_info_func Assign Assign Assign If Compare op:NotEq Raise raises:ValueError('Only supports a single signature key.') Assign Assign Call call:_freeze_saved_model Assign Assign Assign FunctionDef name:convert arguments arg:self Return return:yes" - }, - { - "library": "numpy", - "name": "vsplit", - "source_code": "@array_function_dispatch(_hvdsplit_dispatcher) def vsplit(ary, indices_or_sections): if _nx.ndim(ary) < 2: raise ValueError('vsplit only works on arrays of 2 or more dimensions') return split(ary, indices_or_sections, 0)", - "docstring": "Split an array into multiple sub-arrays vertically (row-wise). Please refer to the `axis=0` (default), the array is always split along the first axis regardless of the array dimension. See Also -------- split : Split an array into multiple sub-arrays of equal size. Examples -------- >>> import numpy as np >>> x = np.arange(16.0).reshape(4, 4) >>> x array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [12., 13., 14., 15.]]) >>> np.vsplit(x, 2) [array([[0., 1., 2., 3.], [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.], [12., 13., 14., 15.]])] >>> np.vsplit(x, np.array([3, 6])) [array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.]]), array([[12., 13., 14., 15.]]), array([], shape=(0, 4), dtype=float64)] With a higher dimensional array the split is still along the first axis. >>> x = np.arange(8.0).reshape(2, 2, 2) >>> x array([[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]) >>> np.vsplit(x, 2) [array([[[0., 1.], [2., 3.]]]), array([[[4., 5.], [6., 7.]]])]", - "type": "function", - "file_path": "numpy\\numpy\\lib\\_shape_base_impl.py", - "ast_data": "FunctionDef name:vsplit arguments arg:ary arg:indices_or_sections Call call:array_function_dispatch If Compare op:Lt Raise raises:ValueError('vsplit only works on arrays of 2 or more dimensions') Return return:yes" - }, - { - "library": "pytorch", - "name": "clear_safe_globals", - "source_code": "def clear_safe_globals() -> None: _weights_only_unpickler._clear_safe_globals()", - "docstring": "Clears the list of globals that are safe for `` load.", - "type": "function", - "file_path": "pytorch\\torch\\serialization.py", - "ast_data": "FunctionDef name:clear_safe_globals arguments" - }, - { - "library": "scipy", - "name": "rmatvec", - "source_code": "def rmatvec(self, x): x = np.asanyarray(x) M, N = self.shape if x.shape ! = (M,) and x.shape ! = (M, 1): raise ValueError('dimension mismatch') y = self._rmatvec(x) if isinstance(x, np.matrix): y = asmatrix(y) else: y = np.asarray(y) if x.ndim = = 1: y = y.reshape(N) elif x.ndim = = 2: y = y.reshape(N, 1) else: raise ValueError('invalid shape returned by user-defined rmatvec()') return y", - "docstring": "Adjoint matrix-vector multiplication. Performs the operation y = A^H @ x where A is an MxN linear operator and x is a column vector or 1-d array. Parameters ---------- x : {matrix, ndarray} An array with shape (M,) or (M,1). Returns ------- y : {matrix, ndarray} A matrix or ndarray with shape (N,) or (N,1) depending on the type and shape of the x argument. Notes ----- This rmatvec wraps the user-specified rmatvec routine or overridden _rmatvec method to ensure that y has the correct shape and type.", - "type": "method", - "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py", - "ast_data": "FunctionDef name:rmatvec arguments arg:self arg:x Assign Call call:asanyarray Assign If BoolOp Compare op:NotEq Compare op:NotEq Raise raises:ValueError('dimension mismatch') Assign Call call:_rmatvec If Call call:isinstance Assign Call call:asmatrix Assign Call call:asarray If Compare op:Eq Assign Call call:reshape If Compare op:Eq Assign Call call:reshape Raise raises:ValueError('invalid shape returned by user-defined rmatvec()') Return return:yes" - }, - { - "library": "pytorch", - "name": "save_for_forward", - "source_code": "def save_for_forward(self, *tensors: torch.Tensor): for tensor in tensors: assert isinstance(tensor, torch.Tensor) or tensor is None, 'save_for_forward expects all arguments to be tensors; you should save non-tensors as attributes on ctx.' self.saved_for_forward = tensors", - "docstring": "Save given tensors for a future call to :func:. `setup_contextforwardjvpsaved_tensorsextending-autograd` for more details on how to use this method. Example:: >>> # xdoctest: +SKIP >>> class Func(torch.autograd.Function): >>> @staticmethod >>> def forward(ctx, x: torch.Tensor, y: torch.Tensor, z: int): >>> ctx.save_for_backward(x, y) >>> ctx.save_for_forward(x, y) >>> ctx.z = z >>> return x * y * z >>> >>> @staticmethod >>> def jvp(ctx, x_t, y_t, _): >>> x, y = ctx.saved_tensors >>> z = ctx.z >>> return z * (y * x_t + x * y_t) >>> >>> @staticmethod >>> def vjp(ctx, grad_out): >>> x, y = ctx.saved_tensors >>> z = ctx.z >>> return z * grad_out * y, z * grad_out * x, None >>> >>> a = torch.tensor(1., requires_grad=True, dtype=torch.double) >>> t = torch.tensor(1., dtype=torch.double) >>> b = torch.tensor(2., requires_grad=True, dtype=torch.double) >>> c = 4 >>> >>> with fwAD.dual_level(): >>> a_dual = fwAD.make_dual(a, t) >>> d = Func.apply(a_dual, b, c)", - "type": "method", - "file_path": "pytorch\\torch\\autograd\\function.py", - "ast_data": "FunctionDef name:save_for_forward arguments arg:self vararg:tensors For Assign" - }, - { - "library": "pytorch", - "name": "set_reference_quantized_module", - "source_code": "def set_reference_quantized_module(self, reference_quantized_module: type[torch.nn.Module]) -> BackendPatternConfig: self.reference_quantized_module = reference_quantized_module return self", - "docstring": "Set the module that represents the reference quantized implementation for this pattern's root module. For more detail, see :func:.", - "type": "method", - "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py", - "ast_data": "FunctionDef name:set_reference_quantized_module arguments arg:self arg:reference_quantized_module type:type[torch.nn.Module] Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "aliased_name", - "source_code": "def aliased_name(self, s): aliases = ''.join((' or %s' % x for x in sorted(self.aliasd.get(s, [])))) return s + aliases", - "docstring": "Return 'PROPNAME or alias' if *s* has an alias, else return 'PROPNAME'. For example, for the line markerfacecolor property, which has an alias, return 'markerfacecolor or mfc' and for the transform property, which does not, return 'transform'.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\artist.py", - "ast_data": "FunctionDef name:aliased_name arguments arg:self arg:s Assign Call call:join Return return:yes" - }, - { - "library": "scikit-learn", - "name": "get_params", - "source_code": "def get_params(self, deep = True): return super()._get_params('estimators', deep = deep)", - "docstring": "Get the parameters of an estimator from the ensemble. Returns the parameters given in the constructor as well as the estimators contained within the parameter. Parameters ---------- deep : bool, default=True Setting it to True gets the various estimators and the parameters of the estimators as well. Returns ------- params : dict Parameter and estimator names mapped to their values or parameter names mapped to their values.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\ensemble\\_base.py", - "ast_data": "FunctionDef name:get_params arguments arg:self arg:deep Return return:yes" - }, - { - "library": "django", - "name": "management_form", - "source_code": "@cached_property def management_form(self): if self.is_bound: form = ManagementForm(self.data, auto_id = self.auto_id, prefix = self.prefix, renderer = self.renderer) form.full_clean() else: form = ManagementForm(auto_id = self.auto_id, prefix = self.prefix, initial = {TOTAL_FORM_COUNT: self.total_form_count(), INITIAL_FORM_COUNT: self.initial_form_count(), MIN_NUM_FORM_COUNT: self.min_num, MAX_NUM_FORM_COUNT: self.max_num}, renderer = self.renderer) return form", - "docstring": "Return the ManagementForm instance for this FormSet.", - "type": "method", - "file_path": "django\\django\\forms\\formsets.py", - "ast_data": "FunctionDef name:management_form arguments arg:self If Assign Call call:ManagementForm Assign Call call:ManagementForm Return return:yes" - }, - { - "library": "pytorch", - "name": "iters_per_second", - "source_code": "@property def iters_per_second(self): return self.num_iters / self.total_time_seconds", - "docstring": "Return total number of iterations per second across all calling threads.", - "type": "method", - "file_path": "pytorch\\torch\\utils\\throughput_benchmark.py", - "ast_data": "FunctionDef name:iters_per_second arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "get_device_name", - "source_code": "def get_device_name(device: Optional[_device_t] = None) -> str: return get_device_properties(device).name", - "docstring": "Get the name of a device. Args: device (torch.device or int or str, optional): device for which to return the name. This function is a no-op if this argument is a negative integer. It uses the current device, given by :func:, if :attr: is `` (default). Returns: str: the name of the device", - "type": "function", - "file_path": "pytorch\\torch\\xpu\\__init__.py", - "ast_data": "FunctionDef name:get_device_name arguments arg:device type:Optional[_device_t] Return return:yes" - }, - { - "library": "matplotlib", - "name": "window_none", - "source_code": "def window_none(x): return x", - "docstring": "No window function; simply return *x*. See Also -------- window_hanning : Another window algorithm.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\mlab.py", - "ast_data": "FunctionDef name:window_none arguments arg:x Return return:yes" - }, - { - "library": "flexx", - "name": "HSplit", - "source_code": "class HSplit(HVLayout): _DEFAULT_ORIENTATION = 'h' _DEFAULT_MODE = 'split'", - "docstring": "Horizontal layout that initially distributes the available space corresponding to the widget's flex values, and has draggable splitters. By default, this layout has a slightly larger spacing between the widgets. (I.e. an HVLayout with orientation 'h' and mode 'split'.)", - "type": "class", - "file_path": "flexx\\flexx\\ui\\layouts\\_hv.py", - "ast_data": "ClassDef name:HSplit Assign Assign" - }, - { - "library": "pandas", - "name": "fast_xs", - "source_code": "def fast_xs(self, loc): raise NotImplementedError('Use series._values[loc] instead')", - "docstring": "fast path for getting a cross-section return a view of the data", - "type": "method", - "file_path": "pandas\\pandas\\core\\internals\\managers.py", - "ast_data": "FunctionDef name:fast_xs arguments arg:self arg:loc Raise raises:NotImplementedError('Use series._values[loc] instead')" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, nrows, ncols, subplot_spec, wspace = None, hspace = None, height_ratios = None, width_ratios = None): self._wspace = wspace self._hspace = hspace if isinstance(subplot_spec, SubplotSpec): self._subplot_spec = subplot_spec else: raise TypeError('subplot_spec must be type SubplotSpec, usually from GridSpec, or axes.get_subplotspec.') self.figure = self._subplot_spec.get_gridspec().figure super().__init__(nrows, ncols, width_ratios = width_ratios, height_ratios = height_ratios)", - "docstring": "Parameters ---------- nrows, ncols : int Number of rows and number of columns of the grid. subplot_spec : SubplotSpec Spec from which the layout parameters are inherited. wspace, hspace : float, optional See for more details. If not specified default values (from the figure or rcParams) are used. height_ratios : array-like of length *nrows*, optional See for details. width_ratios : array-like of length *ncols*, optional See for details.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:nrows arg:ncols arg:subplot_spec arg:wspace arg:hspace arg:height_ratios arg:width_ratios Assign Assign If Call call:isinstance Assign Raise raises:TypeError('subplot_spec must be type SubplotSpec, usually from GridSpec, or axes.get_subplotspec.') Assign" - }, - { - "library": "scipy", - "name": "residues", - "source_code": "def residues(self): if self._residues is None: with np.errstate(divide = 'ignore', invalid = 'ignore'): N = 1 / np.subtract.outer(self.poles(), self._support_points) @ (self._support_values * self.weights) Ddiff = -(1 / np.subtract.outer(self.poles(), self._support_points)) ** 2 @ self.weights self._residues = N / Ddiff return self._residues", - "docstring": "Compute the residues of the poles of the approximation. Returns ------- residues : array Residues associated with the of the approximation", - "type": "method", - "file_path": "scipy\\scipy\\interpolate\\_bary_rational.py", - "ast_data": "FunctionDef name:residues arguments arg:self If Compare op:Is With Assign Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "env", - "source_code": "@tf_export('__internal__.distribute.combinations.env', v1 = []) def env(): return _env", - "docstring": "Returns the object holds the test environment information. Tests should modify this in the main process if needed, and it will be passed to the worker processes each time a test case is run. Returns: a TestEnvironment object.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\combinations.py", - "ast_data": "FunctionDef name:env arguments Call call:tf_export Return return:yes" - }, - { - "library": "tensorflow", - "name": "save_summaries_secs", - "source_code": "@property def save_summaries_secs(self): return self._save_summaries_secs", - "docstring": "Return the delay between summary computations. Returns: A timestamp.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py", - "ast_data": "FunctionDef name:save_summaries_secs arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "html_safe", - "source_code": "def html_safe(klass): if '__html__' in klass.__dict__: raise ValueError(\"can't apply @html_safe to %s because it defines __html__().\" % klass.__name__) if '__str__' not in klass.__dict__: raise ValueError(\"can't apply @html_safe to %s because it doesn't define __str__().\" % klass.__name__) klass_str = klass.__str__ klass.__str__ = lambda self: mark_safe(klass_str(self)) klass.__html__ = lambda self: str(self) return klass", - "docstring": "A decorator that defines the __html__ method. This helps non-Django templates to detect classes whose __str__ methods return SafeString.", - "type": "function", - "file_path": "django\\django\\utils\\html.py", - "ast_data": "FunctionDef name:html_safe arguments arg:klass If Compare op:In Raise raises:ValueError(\"can't apply @html_safe to %s because it defines __html__().\" % klass.__name__) If Compare op:NotIn Raise raises:ValueError(\"can't apply @html_safe to %s because it doesn't define __str__().\" % klass.__name__) Assign Assign Assign Return return:yes" - }, - { - "library": "django", - "name": "window_frame_rows_start_end", - "source_code": "def window_frame_rows_start_end(self, start = None, end = None): if isinstance(start, int) and isinstance(end, int) and (start > end): raise ValueError('start cannot be greater than end.') if start is not None and (not isinstance(start, int)): raise ValueError(f\"start argument must be an integer, zero, or None, but got '{start}'.\") if end is not None and (not isinstance(end, int)): raise ValueError(f\"end argument must be an integer, zero, or None, but got '{end}'.\") start_ = self.window_frame_value(start) or self.UNBOUNDED_PRECEDING end_ = self.window_frame_value(end) or self.UNBOUNDED_FOLLOWING return (start_, end_)", - "docstring": "Return SQL for start and end points in an OVER clause window frame.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\operations.py", - "ast_data": "FunctionDef name:window_frame_rows_start_end arguments arg:self arg:start arg:end If BoolOp Call call:isinstance Call call:isinstance Compare op:Gt Raise raises:ValueError('start cannot be greater than end.') If BoolOp Compare op:IsNot Raise raises:ValueError(f\"start argument must be an integer, zero, or None, but got '{start}'.\") If BoolOp Compare op:IsNot Raise raises:ValueError(f\"end argument must be an integer, zero, or None, but got '{end}'.\") Assign BoolOp Call call:window_frame_value Assign BoolOp Call call:window_frame_value Return return:yes" - }, - { - "library": "tensorflow", - "name": "populate_deserializable_objects", - "source_code": "def populate_deserializable_objects(): global LOCAL if not hasattr(LOCAL, 'ALL_OBJECTS'): LOCAL.ALL_OBJECTS = {} LOCAL.GENERATED_WITH_V2 = None if LOCAL.ALL_OBJECTS and LOCAL.GENERATED_WITH_V2 = = tf2.enabled(): return LOCAL.ALL_OBJECTS = {} LOCAL.GENERATED_WITH_V2 = tf2.enabled() base_cls = base_layer.Layer generic_utils.populate_dict_with_module_objects(LOCAL.ALL_OBJECTS, ALL_MODULES, obj_filter = lambda x: inspect.isclass(x) and issubclass(x, base_cls)) if tf2.enabled(): generic_utils.populate_dict_with_module_objects(LOCAL.ALL_OBJECTS, ALL_V2_MODULES, obj_filter = lambda x: inspect.isclass(x) and issubclass(x, base_cls)) from tensorflow.python.keras import models LOCAL.ALL_OBJECTS['Input'] = input_layer.Input LOCAL.ALL_OBJECTS['InputSpec'] = input_spec.InputSpec LOCAL.ALL_OBJECTS['Functional'] = models.Functional LOCAL.ALL_OBJECTS['Model'] = models.Model LOCAL.ALL_OBJECTS['Sequential'] = models.Sequential LOCAL.ALL_OBJECTS['add'] = merge.add LOCAL.ALL_OBJECTS['subtract'] = merge.subtract LOCAL.ALL_OBJECTS['multiply'] = merge.multiply LOCAL.ALL_OBJECTS['average'] = merge.average LOCAL.ALL_OBJECTS['maximum'] = merge.maximum LOCAL.ALL_OBJECTS['minimum'] = merge.minimum LOCAL.ALL_OBJECTS['concatenate'] = merge.concatenate LOCAL.ALL_OBJECTS['dot'] = merge.dot", - "docstring": "Populates dict ALL_OBJECTS with every built-in layer.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\serialization.py", - "ast_data": "FunctionDef name:populate_deserializable_objects arguments If Assign Assign If BoolOp Compare op:Eq Return return:no Assign Assign Call call:enabled Assign If Call call:enabled Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign" - }, - { - "library": "pytorch", - "name": "unimplemented_v2", - "source_code": "def unimplemented_v2(gb_type: str, context: str, explanation: str, hints: list[str], *, from_exc: Any = _NOTHING, log_warning: bool = False) -> NoReturn: msg = format_graph_break_message(gb_type, context, explanation, hints) if log_warning: log.warning(msg) if from_exc is not _NOTHING: raise Unsupported(msg) from from_exc raise Unsupported(msg)", - "docstring": "Called within dynamo to cause a graph break. Args: gb_type: Context-free graph break type. It should be a short string without any information specific to the tracing context (i.e. no dynamically-generated strings) context: Developer context for the graph break. It can contain tracing context/dynamic strings. explanation: User-facing context-dependent explanation for the graph break. Can be dynamic. hints: List of user-facing hints for the graph break.", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\exc.py", - "ast_data": "FunctionDef name:unimplemented_v2 arguments arg:gb_type type:str arg:context type:str arg:explanation type:str arg:hints type:list[str] Assign Call call:format_graph_break_message If If Compare op:IsNot Raise raises:Unsupported(msg) Raise raises:Unsupported(msg)" - }, - { - "library": "pandas", - "name": "decode", - "source_code": "def decode(self, encoding, errors: str = 'strict', dtype: str | DtypeObj | None = None): if dtype is not None and (not is_string_dtype(dtype)): raise ValueError(f'dtype must be string or object, got dtype = {dtype!r}') if dtype is None and get_option('future.infer_string'): dtype = 'str' if encoding in _cpython_optimized_decoders: f = lambda x: x.decode(encoding, errors) else: decoder = codecs.getdecoder(encoding) f = lambda x: decoder(x, errors)[0] arr = self._data.array result = arr._str_map(f) return self._wrap_result(result, dtype = dtype)", - "docstring": "Decode character string in the Series/Index using indicated encoding. Equivalent to :meth: in python2 and :meth: in python3. Parameters ---------- encoding : str Specifies the encoding to be used. errors : str, optional Specifies the error handling scheme. Possible values are those supported by :meth:. dtype : str or dtype, optional The dtype of the result. When not ``. .. versionadded:: 2.3.0 Returns ------- Series or Index A Series or Index with decoded strings. See Also -------- Series.str.encode : Encodes strings into bytes in a Series/Index. Examples -------- For Series: >>> ser = pd.Series([b\"cow\", b\"123\", b\"()\"]) >>> ser.str.decode(\"ascii\") 0 cow 1 123 2 () dtype: object", - "type": "method", - "file_path": "pandas\\pandas\\core\\strings\\accessor.py", - "ast_data": "FunctionDef name:decode arguments arg:self arg:encoding arg:errors type:str arg:dtype type:str | DtypeObj | None If BoolOp Compare op:IsNot Raise raises:ValueError(f'dtype must be string or object, got dtype={dtype!r}') If BoolOp Compare op:Is Call call:get_option Assign If Compare op:In Assign Assign Call call:getdecoder Assign Assign Assign Call call:_str_map Return return:yes" - }, - { - "library": "django", - "name": "full_clean", - "source_code": "def full_clean(self): self._errors = ErrorDict(renderer = self.renderer) if not self.is_bound: return self.cleaned_data = {} if self.empty_permitted and (not self.has_changed()): return self._clean_fields() self._clean_form() self._post_clean()", - "docstring": "Clean all of self.data and populate self._errors and self.cleaned_data.", - "type": "method", - "file_path": "django\\django\\forms\\forms.py", - "ast_data": "FunctionDef name:full_clean arguments arg:self Assign Call call:ErrorDict If Return return:no Assign If BoolOp Return return:no" - }, - { - "library": "scikit-learn", - "name": "get_feature_names_out", - "source_code": "def get_feature_names_out(self, input_features = None): check_is_fitted(self, '_n_features_out') return _generate_get_feature_names_out(self, self._n_features_out, input_features = input_features)", - "docstring": "Get output feature names for transformation. The feature names out will prefixed by the lowercased class name. For example, if the transformer outputs 3 features, then the feature names out are: . Parameters ---------- input_features : array-like of str or None, default=None Only used to validate feature names with the names seen in . Returns ------- feature_names_out : ndarray of str objects Transformed feature names.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\base.py", - "ast_data": "FunctionDef name:get_feature_names_out arguments arg:self arg:input_features Return return:yes" - }, - { - "library": "pytorch", - "name": "qualified_module_class_name", - "source_code": "@property def qualified_module_class_name(self) -> str: if self._module_class is None: return '' mod_cls = self._module_class if isinstance(mod_cls, type): mod_cls = mod_cls.__module__ + '.' + mod_cls.__qualname__ return mod_cls.replace('.', '_')", - "docstring": "Qualified name of the module class. E.g. .", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py", - "ast_data": "FunctionDef name:qualified_module_class_name arguments arg:self If Compare op:Is Return return:yes Assign If Call call:isinstance Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "create_artists", - "source_code": "def create_artists(self, legend, orig_handle, xdescent, ydescent, width, height, fontsize, trans): raise NotImplementedError('Derived must override')", - "docstring": "Return the legend artists generated. Parameters ---------- legend : The legend for which these legend artists are being created. orig_handle : or similar The object for which these legend artists are being created. xdescent, ydescent, width, height : int The rectangle (*xdescent*, *ydescent*, *width*, *height*) that the legend artists being created should fit within. fontsize : int The fontsize in pixels. The legend artists being created should be scaled according to the given fontsize. trans : The transform that is applied to the legend artists being created. Typically from unit coordinates in the handler box to screen coordinates.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\legend_handler.py", - "ast_data": "FunctionDef name:create_artists arguments arg:self arg:legend arg:orig_handle arg:xdescent arg:ydescent arg:width arg:height arg:fontsize arg:trans Raise raises:NotImplementedError('Derived must override')" - }, - { - "library": "kornia", - "name": "RandomGaussianNoise", - "source_code": "class RandomGaussianNoise(IntensityAugmentationBase2D): def __init__(self, mean: float = 0.0, std: float = 1.0, same_on_batch: bool = False, p: float = 0.5, keepdim: bool = False) -> None: super().__init__(p = p, same_on_batch = same_on_batch, p_batch = 1.0, keepdim = keepdim) self.flags = {'mean': mean, 'std': std} def generate_parameters(self, shape: Tuple[int, ...]) -> Dict[str, Tensor]: return {} def apply_transform(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor] = None) -> Tensor: if 'gaussian_noise' in params: gaussian_noise = params['gaussian_noise'] else: gaussian_noise = _randn_like(input, mean = flags['mean'], std = flags['std']) self._params['gaussian_noise'] = gaussian_noise return input + gaussian_noise", - "docstring": "Add gaussian noise to a batch of multi-dimensional images. .. image:: _static/img/RandomGaussianNoise.png Args: mean: The mean of the gaussian distribution. std: The standard deviation of the gaussian distribution. same_on_batch: apply the same transformation across the batch. p: probability of applying the transformation. keepdim: whether to keep the output shape the same as input (True) or broadcast it to the batch form (False). Examples: >>> rng = torch.manual_seed(0) >>> img = torch.ones(1, 1, 2, 2) >>> RandomGaussianNoise(mean=0., std=1., p=1.)(img) tensor([[[[ 2.5410, 0.7066], [-1.1788, 1.5684]]]]) To apply the exact augmenation again, you may take the advantage of the previous parameter state: >>> input = torch.randn(1, 3, 32, 32) >>> aug = RandomGaussianNoise(mean=0., std=1., p=1.) >>> (aug(input) == aug(input, params=aug._params)).all() tensor(True)", - "type": "class", - "file_path": "kornia\\kornia\\augmentation\\_2d\\intensity\\gaussian_noise.py", - "ast_data": "ClassDef name:RandomGaussianNoise FunctionDef name:__init__ arguments arg:self arg:mean type:float arg:std type:float arg:same_on_batch type:bool arg:p type:float arg:keepdim type:bool Assign FunctionDef name:generate_parameters arguments arg:self arg:shape type:Tuple[int, ...] Return return:yes FunctionDef name:apply_transform arguments arg:self arg:input type:Tensor arg:params type:Dict[str, Tensor] arg:flags type:Dict[str, Any] arg:transform type:Optional[Tensor] If Compare op:In Assign Assign Call call:_randn_like Assign Return return:yes" - }, - { - "library": "kornia", - "name": "transform_boxes_", - "source_code": "def transform_boxes_(self, M: torch.Tensor) -> Boxes3D: return self.transform_boxes(M, inplace = True)", - "docstring": "Inplace version of :func:.", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\boxes.py", - "ast_data": "FunctionDef name:transform_boxes_ arguments arg:self arg:M type:torch.Tensor Return return:yes" - }, - { - "library": "pytorch", - "name": "atleast_2d", - "source_code": "def atleast_2d(*tensors): if has_torch_function(tensors): return handle_torch_function(atleast_2d, tensors, *tensors) if len(tensors) = = 1: tensors = tensors[0] return _VF.atleast_2d(tensors)", - "docstring": "Returns a 2-dimensional view of each input tensor with zero dimensions. Input tensors with two or more dimensions are returned as-is. Args: input (Tensor or list of Tensors) Returns: output (Tensor or tuple of Tensors) Example:: >>> x = torch.tensor(1.) >>> x tensor(1.) >>> torch.atleast_2d(x) tensor([[1.]]) >>> x = torch.arange(4).view(2, 2) >>> x tensor([[0, 1], [2, 3]]) >>> torch.atleast_2d(x) tensor([[0, 1], [2, 3]]) >>> x = torch.tensor(0.5) >>> y = torch.tensor(1.) >>> torch.atleast_2d((x, y)) (tensor([[0.5000]]), tensor([[1.]]))", - "type": "function", - "file_path": "pytorch\\torch\\functional.py", - "ast_data": "FunctionDef name:atleast_2d arguments vararg:tensors If Call call:has_torch_function Return return:yes If Compare op:Eq Assign Return return:yes" - }, - { - "library": "django", - "name": "items", - "source_code": "def items(self): for key in self: yield (key, self[key])", - "docstring": "Yield (key, value) pairs, where value is the last item in the list associated with the key.", - "type": "method", - "file_path": "django\\django\\utils\\datastructures.py", - "ast_data": "FunctionDef name:items arguments arg:self For" - }, - { - "library": "scipy", - "name": "tsem", - "source_code": "def tsem(a, limits = None, inclusive = (True, True), axis = 0, ddof = 1): a = ma.asarray(a).ravel() if limits is None: n = float(a.count()) return a.std(axis = axis, ddof = ddof) / ma.sqrt(n) am = trima(a.ravel(), limits, inclusive) sd = np.sqrt(am.var(axis = axis, ddof = ddof)) return sd / np.sqrt(am.count())", - "docstring": "Compute the trimmed standard error of the mean. This function finds the standard error of the mean for given values, ignoring values outside the given . Parameters ---------- a : array_like array of values limits : None or (lower limit, upper limit), optional Values in the input array less than the lower limit or greater than the upper limit will be ignored. When limits is None, then all values are used. Either of the limit values in the tuple can also be None representing a half-open interval. The default value is None. inclusive : (bool, bool), optional A tuple consisting of the (lower flag, upper flag). These flags determine whether values exactly equal to the lower or upper limits are included. The default value is (True, True). axis : int or None, optional Axis along which to operate. If None, compute over the whole array. Default is zero. ddof : int, optional Delta degrees of freedom. Default is 1. Returns ------- tsem : float Notes ----- For more details on , see .", - "type": "function", - "file_path": "scipy\\scipy\\stats\\_mstats_basic.py", - "ast_data": "FunctionDef name:tsem arguments arg:a arg:limits arg:inclusive arg:axis arg:ddof Assign Call call:ravel If Compare op:Is Assign Call call:float Return return:yes Assign Call call:trima Assign Call call:sqrt Return return:yes" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, canvas, window = None, *, pack_toolbar = True): if window is None: window = canvas.get_tk_widget().master tk.Frame.__init__(self, master = window, borderwidth = 2, width = int(canvas.figure.bbox.width), height = 50) self._buttons = {} for text, tooltip_text, image_file, callback in self.toolitems: if text is None: self._Spacer() else: self._buttons[text] = button = self._Button(text, str(cbook._get_data_path(f'images/{image_file}.png')), toggle = callback in ['zoom', 'pan'], command = getattr(self, callback)) if tooltip_text is not None: add_tooltip(button, tooltip_text) self._label_font = tkinter.font.Font(root = window, size = 10) label = tk.Label(master = self, font = self._label_font, text = '\\xa0\\n\\xa0') label.pack(side = tk.RIGHT) self.message = tk.StringVar(master = self) self._message_label = tk.Label(master = self, font = self._label_font, textvariable = self.message, justify = tk.RIGHT) self._message_label.pack(side = tk.RIGHT) NavigationToolbar2.__init__(self, canvas) if pack_toolbar: self.pack(side = tk.BOTTOM, fill = tk.X)", - "docstring": "Parameters ---------- canvas : The figure canvas on which to operate. window : tk.Window The tk.Window which owns this toolbar. pack_toolbar : bool, default: True If True, add the toolbar to the parent's pack manager's packing list during initialization with ``.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backends\\_backend_tk.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:canvas arg:window If Compare op:Is Assign Assign For If Compare op:Is Assign Call call:_Button If Compare op:IsNot Assign Call call:Font Assign Call call:Label Assign Call call:StringVar Assign Call call:Label If" - }, - { - "library": "tensorflow", - "name": "constraint", - "source_code": "@property def constraint(self): return self._constraint", - "docstring": "Returns the constraint function associated with this variable. Returns: The constraint function that was passed to the variable constructor. Can be if no constraint was passed.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", - "ast_data": "FunctionDef name:constraint arguments arg:self Return return:yes" - }, - { - "library": "authlib", - "name": "get_jwt_config", - "source_code": "def get_jwt_config(self): raise NotImplementedError()", - "docstring": "Get the JWT configuration for OpenIDImplicitGrant. The JWT configuration will be used to generate ``. Developers MUST implement this method in subclass, e.g.:: def get_jwt_config(self): return { \"key\": read_private_key_file(key_path), \"alg\": \"RS256\", \"iss\": \"issuer-identity\", \"exp\": 3600, } :return: dict", - "type": "method", - "file_path": "authlib\\authlib\\oidc\\core\\grants\\implicit.py", - "ast_data": "FunctionDef name:get_jwt_config arguments arg:self Raise raises:NotImplementedError()" - }, - { - "library": "matplotlib", - "name": "set_fontsize", - "source_code": "def set_fontsize(self, fontsize): self._fontproperties.set_size(fontsize) self.stale = True", - "docstring": "Set the font size. Parameters ---------- fontsize : float or {'xx-small', 'x-small', 'small', 'medium', 'large', 'x-large', 'xx-large'} If a float, the fontsize in points. The string values denote sizes relative to the default font size. See Also -------- .font_manager.FontProperties.set_size", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\text.py", - "ast_data": "FunctionDef name:set_fontsize arguments arg:self arg:fontsize Assign" - }, - { - "library": "pytorch", - "name": "get_swap_module_params_on_conversion", - "source_code": "def get_swap_module_params_on_conversion() -> bool: return _swap_module_params_on_conversion", - "docstring": "Returns whether to use :func: instead of setting .data to change the existing parameters in-place when converting an `~torch.__future__.set_swap_module_params_on_conversion` for more information.", - "type": "function", - "file_path": "pytorch\\torch\\__future__.py", - "ast_data": "FunctionDef name:get_swap_module_params_on_conversion arguments Return return:yes" - }, - { - "library": "pytorch", - "name": "generate_all_int_dyn_dim_possibilities", - "source_code": "def generate_all_int_dyn_dim_possibilities(my_list: list[DVar]): eq_possibilities = [BinConstraintD(my_list[i], Dyn, op_eq) for i in range(len(my_list))] neq_possibilities = [BinConstraintD(my_list[i], Dyn, op_neq) for i in range(len(my_list))] d_possibilities = [list(i) for i in zip(eq_possibilities, neq_possibilities)] all_possibilities = list(itertools.product(*d_possibilities)) return all_possibilities", - "docstring": "Generate all possibilities of being equal or not equal to dyn for my_list Args: my_list: List of tensor dimensions Returns: A list of a list of constraints. Each list of constraints corresponds to one possibility about the values of the dimension variables", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py", - "ast_data": "FunctionDef name:generate_all_int_dyn_dim_possibilities arguments arg:my_list type:list[DVar] Assign Assign Assign Assign Call call:list Return return:yes" - }, - { - "library": "authlib", - "name": "register_client_auth_method", - "source_code": "def register_client_auth_method(self, auth): if isinstance(auth, tuple): self._auth_methods[auth[0]] = auth[1] else: self._auth_methods[auth.name] = auth", - "docstring": "Extend client authenticate for token endpoint. :param auth: an instance to sign the request", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\client.py", - "ast_data": "FunctionDef name:register_client_auth_method arguments arg:self arg:auth If Call call:isinstance Assign Assign" - }, - { - "library": "matplotlib", - "name": "reset_position", - "source_code": "def reset_position(self): for ax in self._twinned_axes.get_siblings(self): pos = ax.get_position(original = True) ax.set_position(pos, which = 'active')", - "docstring": "Reset the active position to the original position. This undoes changes to the active position (as defined in ) which may have been performed to satisfy fixed-aspect constraints.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", - "ast_data": "FunctionDef name:reset_position arguments arg:self For Call call:get_siblings Assign Call call:get_position" - }, - { - "library": "pytorch", - "name": "get_default_mmap_options", - "source_code": "def get_default_mmap_options() -> Optional[int]: from torch.utils.serialization import config return config.load.mmap_flags", - "docstring": "Get default mmap options for :func: with ``. Returns: default_mmap_options: int", - "type": "function", - "file_path": "pytorch\\torch\\serialization.py", - "ast_data": "FunctionDef name:get_default_mmap_options arguments Return return:yes" - }, - { - "library": "matplotlib", - "name": "open_file_cm", - "source_code": "def open_file_cm(path_or_file, mode = 'r', encoding = None): fh, opened = to_filehandle(path_or_file, mode, True, encoding) return fh if opened else contextlib.nullcontext(fh)", - "docstring": "Pass through file objects and context-manage path-likes.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", - "ast_data": "FunctionDef name:open_file_cm arguments arg:path_or_file arg:mode arg:encoding Assign Call call:to_filehandle Return return:yes" - }, - { - "library": "scipy", - "name": "reconstruct_skel_matrix", - "source_code": "def reconstruct_skel_matrix(A, k, idx): return A[:, idx[: k]]", - "docstring": "Reconstruct skeleton matrix from ID. The skeleton matrix can be reconstructed from the original matrix and its ID rank and indices and , respectively, as:: B = A[:,idx[:k]] The original matrix can then be reconstructed via:: numpy.hstack([B, numpy.dot(B, proj)])[:,numpy.argsort(idx)] See also :func: and :func:. .. This function automatically detects the matrix data type and calls the appropriate backend. For details, see :func: and :func:. Parameters ---------- A : :class: Original matrix. k : int Rank of ID. idx : :class: Column index array. Returns ------- :class: Skeleton matrix.", - "type": "function", - "file_path": "scipy\\scipy\\linalg\\interpolative.py", - "ast_data": "FunctionDef name:reconstruct_skel_matrix arguments arg:A arg:k arg:idx Return return:yes" - }, - { - "library": "pygame", - "name": "set_underline", - "source_code": "def set_underline(self, value): self.underline = bool(value)", - "docstring": "set_underline(bool) -> None control if text is rendered with an underline", - "type": "method", - "file_path": "pygame\\src_py\\ftfont.py", - "ast_data": "FunctionDef name:set_underline arguments arg:self arg:value Assign Call call:bool" - }, - { - "library": "pytorch", - "name": "reset_modules", - "source_code": "def reset_modules(nodes: list[fx.Node], modules: dict[str, nn.Module], old_modules: dict[nn.Module, nn.Module]): for node in nodes: if node.op = = 'call_module': assert isinstance(node.target, str) cur_module = modules[node.target] if cur_module in old_modules: replace_node_module(node, modules, old_modules[cur_module])", - "docstring": "Maps each module that's been changed with back to its original.", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\optimization.py", - "ast_data": "FunctionDef name:reset_modules arguments arg:nodes type:list[fx.Node] arg:modules type:dict[str, nn.Module] arg:old_modules type:dict[nn.Module, nn.Module] For If Compare op:Eq Assign If Compare op:In" - }, - { - "library": "tensorflow", - "name": "values", - "source_code": "@property def values(self): return self._values", - "docstring": "The non-zero values in the represented dense tensor. Returns: A 1-D Tensor of any data type.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\sparse_tensor.py", - "ast_data": "FunctionDef name:values arguments arg:self Return return:yes" - }, - { - "library": "kornia", - "name": "affine", - "source_code": "def affine(tensor: Tensor, matrix: Tensor, mode: str = 'bilinear', padding_mode: str = 'zeros', align_corners: bool = True) -> Tensor: is_unbatched: bool = tensor.ndimension() = = 3 if is_unbatched: tensor = torch.unsqueeze(tensor, dim = 0) matrix = matrix.expand(tensor.shape[0], -1, -1) height: int = tensor.shape[-2] width: int = tensor.shape[-1] warped: Tensor = warp_affine(tensor, matrix, (height, width), mode, padding_mode, align_corners) if is_unbatched: warped = torch.squeeze(warped, dim = 0) return warped", - "docstring": "Apply an affine transformation to the image. .. image:: _static/img/warp_affine.png Args: tensor: The image tensor to be warped in shapes of :math:, :math: and :math:. matrix: The 2x3 affine transformation matrix. mode: interpolation mode to calculate output values ``. align_corners: interpolation flag. Returns: The warped image with the same shape as the input. Example: >>> img = torch.rand(1, 2, 3, 5) >>> aff = torch.eye(2, 3)[None] >>> out = affine(img, aff) >>> print(out.shape) torch.Size([1, 2, 3, 5])", - "type": "function", - "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py", - "ast_data": "FunctionDef name:affine arguments arg:tensor type:Tensor arg:matrix type:Tensor arg:mode type:str arg:padding_mode type:str arg:align_corners type:bool If Assign Call call:unsqueeze Assign Call call:expand If Assign Call call:squeeze Return return:yes" - }, - { - "library": "pytorch", - "name": "from_dict", - "source_code": "@classmethod def from_dict(cls, convert_custom_config_dict: dict[str, Any]) -> ConvertCustomConfig: conf = cls() for quant_type_name, custom_module_mapping in convert_custom_config_dict.get(OBSERVED_TO_QUANTIZED_DICT_KEY, {}).items(): quant_type = _quant_type_from_str(quant_type_name) for observed_class, quantized_class in custom_module_mapping.items(): conf.set_observed_to_quantized_mapping(observed_class, quantized_class, quant_type) conf.set_preserved_attributes(convert_custom_config_dict.get(PRESERVED_ATTRIBUTES_DICT_KEY, [])) return conf", - "docstring": "Create a `` This function is primarily for backward compatibility and may be removed in the future.", - "type": "method", - "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py", - "ast_data": "FunctionDef name:from_dict arguments arg:cls arg:convert_custom_config_dict type:dict[str, Any] Assign Call call:cls For Call call:items Assign Call call:_quant_type_from_str For Call call:items Return return:yes" - }, - { - "library": "pytorch", - "name": "register_rendezvous_handler", - "source_code": "def register_rendezvous_handler(scheme, handler): global _rendezvous_handlers if scheme in _rendezvous_handlers: raise RuntimeError(f'Rendezvous handler for {scheme}: // already registered') _rendezvous_handlers[scheme] = handler", - "docstring": "Register a new rendezvous handler. Before we can run collective algorithms, participating processes need to find each other and exchange information to be able to communicate. We call this process rendezvous. The outcome of the rendezvous process is a triplet containing a shared key/value store, the rank of the process, and the total number of participating processes. If none of the bundled rendezvous methods apply to your execution environment you can opt to register your own rendezvous handler. Pick a unique name and use the URL scheme to identify it when calling the function. Args: scheme (str): URL scheme to identify your rendezvous handler. handler (function): Handler that is invoked when the function is called with a URL that uses the corresponding scheme. It must be a generator function that yields the triplet.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\rendezvous.py", - "ast_data": "FunctionDef name:register_rendezvous_handler arguments arg:scheme arg:handler If Compare op:In Raise raises:RuntimeError(f'Rendezvous handler for {scheme}:// already registered') Assign" - }, - { - "library": "salmon", - "name": "from_file", - "source_code": "def from_file(fileobj): try: msg = email.message_from_file(fileobj) except TypeError: fileobj.seek(0) msg = email.message_from_binary_file(fileobj) return from_message(msg)", - "docstring": "Reads an email and cleans it up to make a MailBase.", - "type": "function", - "file_path": "salmon\\salmon\\encoding.py", - "ast_data": "FunctionDef name:from_file arguments arg:fileobj Try Assign Call call:message_from_file ExceptHandler Assign Call call:message_from_binary_file Return return:yes" - }, - { - "library": "scikit-learn", - "name": "predict", - "source_code": "def predict(self, X): check_is_fitted(self) decision_func = self.decision_function(X) is_inlier = np.ones_like(decision_func, dtype = int) is_inlier[decision_func < 0] = -1 return is_inlier", - "docstring": "Predict if a particular sample is an outlier or not. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The input samples. Internally, it will be converted to ``. This is because, predict may actually be faster without parallelization for a small number of samples, such as for 1000 samples or less. The user can set the number of jobs in the joblib context to control the number of parallel jobs. .. code-block:: python from joblib import parallel_backend # Note, we use threading here as the predict method is not CPU bound. with parallel_backend(\"threading\", n_jobs=4): model.predict(X)", - "type": "method", - "file_path": "scikit-learn\\sklearn\\ensemble\\_iforest.py", - "ast_data": "FunctionDef name:predict arguments arg:self arg:X Assign Call call:decision_function Assign Call call:ones_like Assign Return return:yes" - }, - { - "library": "pandas", - "name": "num_chunks", - "source_code": "def num_chunks(self) -> int: return 1", - "docstring": "Return the number of chunks the column consists of.", - "type": "method", - "file_path": "pandas\\pandas\\core\\interchange\\column.py", - "ast_data": "FunctionDef name:num_chunks arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "LazyInstanceNorm3d", - "source_code": "class LazyInstanceNorm3d(_LazyNormBase, _InstanceNorm): cls_to_become = InstanceNorm3d def _get_no_batch_dim(self): return 4 def _check_input_dim(self, input): if input.dim() not in (4, 5): raise ValueError(f'expected 4D or 5D input (got {input.dim()}D input)')", - "docstring": "A :class: module with lazy initialization of the `InstanceNorm3dweightbiasrunning_meanrunning_vartorch.nn.modules.lazy.LazyModuleMixinC(N, C, D, H, W)(C, D, H, W)(N, C, D, H, W)(C, D, H, W)(N, C, D, H, W)(C, D, H, W)` (same shape as input)", - "type": "class", - "file_path": "pytorch\\torch\\nn\\modules\\instancenorm.py", - "ast_data": "ClassDef name:LazyInstanceNorm3d Assign FunctionDef name:_get_no_batch_dim arguments arg:self Return return:yes FunctionDef name:_check_input_dim arguments arg:self arg:input If Compare op:NotIn Raise raises:ValueError(f'expected 4D or 5D input (got {input.dim()}D input)')" - }, - { - "library": "tensorflow", - "name": "watched_variables", - "source_code": "def watched_variables(self): return pywrap_tfe.TFE_Py_VariableWatcherWatchedVariables(self._variable_watcher)", - "docstring": "Returns a tuple of variables accessed under this scope.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\eager\\record.py", - "ast_data": "FunctionDef name:watched_variables arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "recover_original_precision_folded_computation_ops", - "source_code": "def recover_original_precision_folded_computation_ops(gm): graph = gm.graph for target, idx in ((aten.convolution.default, (1, 2)), (aten.addmm.default, (0, 2)), (aten.mm.default, (1,))): for node in graph.find_nodes(op = 'call_function', target = target): orig_dtype = node.meta.get('_allow_mixed_dtype_folding', None) if orig_dtype is None: continue with graph.inserting_before(node): for i in idx: old_input = node.args[i] if old_input is None: continue new_input = graph.create_node('call_function', prims.convert_element_type.default, (old_input, orig_dtype)) node.replace_input_with(old_input, new_input)", - "docstring": "After binary folding conv/linear weights and biases to a higher dtype, recover the original precision they were in.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\fx_passes\\binary_folding.py", - "ast_data": "FunctionDef name:recover_original_precision_folded_computation_ops arguments arg:gm Assign For For Call call:find_nodes Assign Call call:get If Compare op:Is With For Assign If Compare op:Is Assign Call call:create_node" - }, - { - "library": "tensorflow", - "name": "gradient_tensor", - "source_code": "def gradient_tensor(self, x_tensor): x_tensor_name = self._get_tensor_name(x_tensor) if x_tensor_name not in self._gradient_tensors: raise LookupError('This GradientsDebugger has not received any gradient tensor for x-tensor %s' % x_tensor_name) return self._gradient_tensors[x_tensor_name]", - "docstring": "Get the gradient tensor of an x-tensor. Args: x_tensor: (, or ) The x-tensor object or its name. x-tensor refers to the independent , i.e., the tensor on the denominator of the differentiation. Returns: If found, the gradient tensor. Raises: TypeError: If is not a , or . LookupError: If the has not been registered with a gradient tensor.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_gradients.py", - "ast_data": "FunctionDef name:gradient_tensor arguments arg:self arg:x_tensor Assign Call call:_get_tensor_name If Compare op:NotIn Raise raises:LookupError('This GradientsDebugger has not received any gradient tensor for x-tensor %s' % x_tensor_name) Return return:yes" - }, - { - "library": "tensorflow", - "name": "scatter_sub", - "source_code": "def scatter_sub(self, sparse_delta, use_locking = False, name = None): if not isinstance(sparse_delta, indexed_slices.IndexedSlices): raise TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}') return self._lazy_read(gen_resource_variable_ops.resource_scatter_sub(self.handle, sparse_delta.indices, ops.convert_to_tensor(sparse_delta.values, self.dtype), name = name))", - "docstring": "Subtracts from this variable. Args: sparse_delta: to be subtracted from this variable. use_locking: If , use locking during the operation. name: the name of the operation. Returns: The updated variable. Raises: TypeError: if is not an .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", - "ast_data": "FunctionDef name:scatter_sub arguments arg:self arg:sparse_delta arg:use_locking arg:name If Raise raises:TypeError(f'Argument `sparse_delta` must be a `tf.IndexedSlices`. Received arg: {sparse_delta}') Return return:yes" - }, - { - "library": "pytorch", - "name": "torch_key", - "source_code": "@torch_key_cache def torch_key() -> bytes: with dynamo_timed('inductor_codecache_torch_key', log_pt2_compile_event = False): if not config.is_fbcode(): def get_code_hash(root: str) -> bytes: extra_files = ('codegen/aoti_runtime/interface.cpp', 'script.ld') inductor_root = os.path.dirname(__file__) extra_files = [os.path.join(inductor_root, x) for x in extra_files] hasher = hashlib.sha256() hasher.update(torch.__version__.encode('utf-8')) build_code_hash([root], '', hasher) for path in extra_files: if os.path.exists(path): with open(path, 'rb') as f: hasher.update(f.read()) return hasher.digest() return get_code_hash(_TORCH_PATH) from libfb.py import parutil return parutil.get_file_contents('torch/src_hash.txt').rstrip().encode('ascii')", - "docstring": "Compute a key that contains relevant information about torch source files", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\codecache.py", - "ast_data": "FunctionDef name:torch_key arguments With If FunctionDef name:get_code_hash arguments arg:root type:str Assign Assign Call call:dirname Assign Assign Call call:sha256 For If Call call:exists With Return return:yes Return return:yes Return return:yes" - }, - { - "library": "sphinx", - "name": "toc_metadata", - "source_code": "def toc_metadata(self, level: int, navpoints: list[NavPoint]) -> dict[str, Any]: metadata: dict[str, Any] = {'uid': self.config.epub_uid, 'title': html.escape(self.config.epub_title), 'level': level, 'navpoints': navpoints} return metadata", - "docstring": "Create a dictionary with all metadata for the toc.ncx file properly escaped.", - "type": "method", - "file_path": "sphinx\\sphinx\\builders\\_epub_base.py", - "ast_data": "FunctionDef name:toc_metadata arguments arg:self arg:level type:int arg:navpoints type:list[NavPoint] Return return:yes" - }, - { - "library": "matplotlib", - "name": "num2timedelta", - "source_code": "def num2timedelta(x): return _ordinalf_to_timedelta_np_vectorized(x).tolist()", - "docstring": "Convert number of days to a object. If *x* is a sequence, a sequence of objects will be returned. Parameters ---------- x : float, sequence of floats Number of days. The fraction part represents hours, minutes, seconds. Returns ------- or list[]", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\dates.py", - "ast_data": "FunctionDef name:num2timedelta arguments arg:x Return return:yes" - }, - { - "library": "tensorflow", - "name": "array", - "source_code": "@tf_export.tf_export('experimental.numpy.array', v1 = []) @np_utils.np_doc_only('array') def array(val, dtype = None, copy = True, ndmin = 0): if dtype: dtype = np_utils.result_type(dtype) return _array_internal(val, dtype, copy, ndmin)", - "docstring": "Since Tensors are immutable, a copy is made only if val is placed on a different device than the current one. Even if is False, a new Tensor may need to be built to satisfy and . This is used only if is an ndarray or a Tensor.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\numpy_ops\\np_array_ops.py", - "ast_data": "FunctionDef name:array arguments arg:val arg:dtype arg:copy arg:ndmin Call call:tf_export Call call:np_doc_only If Assign Call call:result_type Return return:yes" - }, - { - "library": "tensorflow", - "name": "Conv3DTranspose", - "source_code": "class Conv3DTranspose(keras_layers.Conv3DTranspose, base.Layer): def __init__(self, filters, kernel_size, strides = (1, 1, 1), padding = 'valid', data_format = 'channels_last', activation = None, use_bias = True, kernel_initializer = None, bias_initializer = init_ops.zeros_initializer(), kernel_regularizer = None, bias_regularizer = None, activity_regularizer = None, kernel_constraint = None, bias_constraint = None, trainable = True, name = None, **kwargs): super(Conv3DTranspose, self).__init__(filters = filters, kernel_size = kernel_size, strides = strides, padding = padding, data_format = data_format, activation = activation, use_bias = use_bias, kernel_initializer = kernel_initializer, bias_initializer = bias_initializer, kernel_regularizer = kernel_regularizer, bias_regularizer = bias_regularizer, activity_regularizer = activity_regularizer, kernel_constraint = kernel_constraint, bias_constraint = bias_constraint, trainable = trainable, name = name, **kwargs)", - "docstring": "Transposed 3D convolution layer (sometimes called 3D Deconvolution). Args: filters: Integer, the dimensionality of the output space (i.e. the number of filters in the convolution). kernel_size: An integer or tuple/list of 3 integers, specifying the depth, height and width of the 3D convolution window. Can be a single integer to specify the same value for all spatial dimensions. strides: An integer or tuple/list of 3 integers, specifying the strides of the convolution along the depth, height and width. Can be a single integer to specify the same value for all spatial dimensions. padding: One of or (case-insensitive). means no padding. results in padding evenly to the left/right or up/down of the input such that output has the same height/width dimension as the input. data_format: A string, one of (default) or . The ordering of the dimensions in the inputs. corresponds to inputs with shape while corresponds to inputs with shape . activation: Activation function. Set it to to maintain a linear activation. use_bias: Boolean, whether the layer uses a bias. kernel_initializer: An initializer for the convolution kernel. bias_initializer: An initializer for the bias vector. If , the default initializer will be used. kernel_regularizer: Optional regularizer for the convolution kernel. bias_regularizer: Optional regularizer for the bias vector. activity_regularizer: Optional regularizer function for the output. kernel_constraint: Optional projection function to be applied to the kernel after being updated by an (e.g. used to implement norm constraints or value constraints for layer weights). The function must take as input the unprojected variable and must return the projected variable (which must have the same shape). Constraints are not safe to use when doing asynchronous distributed training. bias_constraint: Optional projection function to be applied to the bias after being updated by an . trainable: Boolean, if also add variables to the graph collection (see ). name: A string, the name of the layer.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\keras\\legacy_tf_layers\\convolutional.py", - "ast_data": "ClassDef name:Conv3DTranspose FunctionDef name:__init__ arguments arg:self arg:filters arg:kernel_size arg:strides arg:padding arg:data_format arg:activation arg:use_bias arg:kernel_initializer arg:bias_initializer arg:kernel_regularizer arg:bias_regularizer arg:activity_regularizer arg:kernel_constraint arg:bias_constraint arg:trainable arg:name kwarg:kwargs" - }, - { - "library": "scipy", - "name": "pinvh", - "source_code": "@_apply_over_batch(('a', 2)) def pinvh(a, atol = None, rtol = None, lower = True, return_rank = False, check_finite = True): a = _asarray_validated(a, check_finite = check_finite) s, u = _decomp.eigh(a, lower = lower, check_finite = False, driver = 'ev') t = u.dtype.char.lower() maxS = np.max(np.abs(s), initial = 0.0) atol = 0.0 if atol is None else atol rtol = max(a.shape) * np.finfo(t).eps if rtol is None else rtol if atol < 0.0 or rtol < 0.0: raise ValueError('atol and rtol values must be positive.') val = atol + maxS * rtol above_cutoff = abs(s) > val psigma_diag = 1.0 / s[above_cutoff] u = u[:, above_cutoff] B = u * psigma_diag @ u.conj().T if return_rank: return (B, len(psigma_diag)) else: return B", - "docstring": "Compute the (Moore-Penrose) pseudo-inverse of a Hermitian matrix. Calculate a generalized inverse of a complex Hermitian/real symmetric matrix using its eigenvalue decomposition and including all eigenvalues with 'large' absolute value. Parameters ---------- a : (N, N) array_like Real symmetric or complex hermetian matrix to be pseudo-inverted atol : float, optional Absolute threshold term, default value is 0. .. versionadded:: 1.7.0 rtol : float, optional Relative threshold term, default value is `aareturn_rankpinv`. >>> import numpy as np >>> from scipy.linalg import pinvh >>> rng = np.random.default_rng() >>> a = rng.standard_normal((9, 6)) >>> a = np.dot(a, a.T) >>> B = pinvh(a) >>> np.allclose(a, a @ B @ a) True >>> np.allclose(B, B @ a @ B) True", - "type": "function", - "file_path": "scipy\\scipy\\linalg\\_basic.py", - "ast_data": "FunctionDef name:pinvh arguments arg:a arg:atol arg:rtol arg:lower arg:return_rank arg:check_finite Call call:_apply_over_batch Assign Call call:_asarray_validated Assign Call call:eigh Assign Call call:lower Assign Call call:max Assign Assign If BoolOp Compare op:Lt Compare op:Lt Raise raises:ValueError('atol and rtol values must be positive.') Assign Assign Compare op:Gt Assign Assign Assign If Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "write_outputs", - "source_code": "def write_outputs(self, variable_name: str, filename: str | Path) -> None: content = '\\n'.join(('set(', variable_name, *(f' \"{file.as_posix()}\"' for file in sorted(self.files)), ')')) self._write_if_changed(filename, content)", - "docstring": "Write a file containing the list of all outputs which are generated by this script.", - "type": "method", - "file_path": "pytorch\\torchgen\\utils.py", - "ast_data": "FunctionDef name:write_outputs arguments arg:self arg:variable_name type:str arg:filename type:str | Path Assign Call call:join" - }, - { - "library": "scikit-learn", - "name": "trace_dot", - "source_code": "def trace_dot(X, Y): return np.dot(X.ravel(), Y.ravel())", - "docstring": "Trace of np.dot(X, Y.T). Parameters ---------- X : array-like First matrix. Y : array-like Second matrix.", - "type": "function", - "file_path": "scikit-learn\\sklearn\\decomposition\\_nmf.py", - "ast_data": "FunctionDef name:trace_dot arguments arg:X arg:Y Return return:yes" - }, - { - "library": "coconut", - "name": "defer", - "source_code": "def defer(item): return add_action(item, DeferredNode)", - "docstring": "Defers evaluation of the given item. Only does any actual deferring if USE_COMPUTATION_GRAPH is True.", - "type": "function", - "file_path": "coconut\\coconut\\compiler\\util.py", - "ast_data": "FunctionDef name:defer arguments arg:item Return return:yes" - }, - { - "library": "tensorflow", - "name": "uses_star_args_or_kwargs_in_call", - "source_code": "def uses_star_args_or_kwargs_in_call(node): return uses_star_args_in_call(node) or uses_star_kwargs_in_call(node)", - "docstring": "Check if an ast.Call node uses arbitrary-length *args or **kwargs. This function works with the AST call node format of Python3.5+ as well as the different AST format of earlier versions of Python. Args: node: The ast.Call node to check arg values for. Returns: True if the node uses starred variadic positional args or keyword args. False if it does not.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\ast_edits.py", - "ast_data": "FunctionDef name:uses_star_args_or_kwargs_in_call arguments arg:node Return return:yes" - }, - { - "library": "matplotlib", - "name": "start_event_loop", - "source_code": "def start_event_loop(self, timeout = 0): if timeout < = 0: timeout = np.inf timestep = 0.01 counter = 0 self._looping = True while self._looping and counter * timestep < timeout: self.flush_events() time.sleep(timestep) counter + = 1", - "docstring": "Start a blocking event loop. Such an event loop is used by interactive functions, such as and , to wait for events. The event loop blocks until a callback function triggers , or *timeout* is reached. If *timeout* is 0 or negative, never timeout. Only interactive backends need to reimplement this method and it relies on being properly implemented. Interactive backends should implement this in a more native way.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", - "ast_data": "FunctionDef name:start_event_loop arguments arg:self arg:timeout If Compare op:LtE Assign Assign Assign Assign While BoolOp Compare op:Lt" - }, - { - "library": "pytorch", - "name": "fresh_inductor_cache", - "source_code": "@contextlib.contextmanager def fresh_inductor_cache(cache_entries: Optional[dict[str, Any]] = None, dir: Optional[str] = None, delete: bool = True) -> Iterator[None]: clear_inductor_caches() inductor_cache_dir = tempfile.mkdtemp(dir = dir) try: with mock.patch.dict(os.environ, {'TORCHINDUCTOR_CACHE_DIR': inductor_cache_dir}): log.debug('Using inductor cache dir %s', inductor_cache_dir) triton_cache_dir = os.path.join(inductor_cache_dir, 'triton') with mock.patch.dict(os.environ, {'TRITON_CACHE_DIR': triton_cache_dir}): yield if isinstance(cache_entries, dict): assert len(cache_entries) = = 0, 'expected empty cache_entries dict' if os.path.exists(triton_cache_dir): files = os.listdir(triton_cache_dir) cache_entries.update({f: os.path.getsize(os.path.join(triton_cache_dir, f)) for f in files if '.lock' not in f}) if delete: if is_windows() and torch.xpu.is_available(): unload_xpu_triton_pyds() shutil.rmtree(inductor_cache_dir, onerror = lambda func, path, exc_info: log.warning('Failed to remove temporary cache dir at %s', inductor_cache_dir, exc_info = exc_info)) except Exception: log.warning('on error, temporary cache dir kept at %s', inductor_cache_dir) raise finally: clear_inductor_caches()", - "docstring": "Contextmanager that provides a clean tmp cachedir for inductor. Optionally, pass a dict as 'cache_entries' to get a list of filenames and sizes generated with this cache instance.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\utils.py", - "ast_data": "FunctionDef name:fresh_inductor_cache arguments arg:cache_entries type:Optional[dict[str, Any]] arg:dir type:Optional[str] arg:delete type:bool Assign Call call:mkdtemp Try With Assign Call call:join With If Call call:isinstance If Call call:exists Assign Call call:listdir If If BoolOp Call call:is_windows Call call:is_available ExceptHandler Raise" - }, - { - "library": "authlib", - "name": "validate_software_id", - "source_code": "def validate_software_id(self): pass", - "docstring": "A unique identifier string (e.g., a Universally Unique Identifier (UUID)) assigned by the client developer or software publisher used by registration endpoints to identify the client software to be dynamically registered. Unlike \"client_id\", which is issued by the authorization server and SHOULD vary between instances, the \"software_id\" SHOULD remain the same for all instances of the client software. The \"software_id\" SHOULD remain the same across multiple updates or versions of the same piece of software. The value of this field is not intended to be human readable and is usually opaque to the client and authorization server.", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\rfc7591\\claims.py", - "ast_data": "FunctionDef name:validate_software_id arguments arg:self" - }, - { - "library": "tensorflow", - "name": "flatten_with_tuple_paths", - "source_code": "def flatten_with_tuple_paths(structure, expand_composites = False): return list(zip(yield_flat_paths(structure, expand_composites = expand_composites), flatten(structure, expand_composites = expand_composites)))", - "docstring": "Returns a list of tuples. The order of pairs produced matches that of . This allows you to flatten a nested structure while keeping information about where in the structure each atom was located. See for more information about tuple paths. Args: structure: the nested structure to flatten. expand_composites: If true, then composite tensors such as and are expanded into their component tensors. Returns: A list of tuples. Each is a tuple of indices and/or dictionary keys that uniquely specify the path to within .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\util\\nest.py", - "ast_data": "FunctionDef name:flatten_with_tuple_paths arguments arg:structure arg:expand_composites Return return:yes" - }, - { - "library": "pytorch", - "name": "write", - "source_code": "@classmethod def write(cls, source_code: str, dst_file_ext: str) -> tuple[str, str]: cuda_command = repr(rocm_compile_command(['dummy_input'], 'dummy_output', dst_file_ext)) key, input_path = write(source_code, cls._SOURCE_CODE_SUFFIX, extra = cuda_command) return (key, input_path)", - "docstring": "Writes source code into a file with dst_file_ext as the file extension. Returns the hash key of source code, and the path to the file.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codecache.py", - "ast_data": "FunctionDef name:write arguments arg:cls arg:source_code type:str arg:dst_file_ext type:str Assign Call call:repr Assign Call call:write Return return:yes" - }, - { - "library": "feincms", - "name": "register_response_processor", - "source_code": "@classmethod def register_response_processor(cls, fn, key = None): if cls.response_processors is None: cls.response_processors = OrderedDict() cls.response_processors[fn if key is None else key] = fn", - "docstring": "Registers the passed callable as response processor. A response processor always receives three arguments, the current object, the request and the response.", - "type": "method", - "file_path": "feincms\\feincms\\module\\mixins.py", - "ast_data": "FunctionDef name:register_response_processor arguments arg:cls arg:fn arg:key If Compare op:Is Assign Call call:OrderedDict Assign" - }, - { - "library": "pytorch", - "name": "PassBase", - "source_code": "@compatibility(is_backward_compatible = False) class PassBase(abc.ABC): def __call__(self, graph_module: GraphModule) -> Optional[PassResult]: self.requires(graph_module) res = self.call(graph_module) self.ensures(graph_module) return res @abc.abstractmethod def call(self, graph_module: GraphModule) -> Optional[PassResult]: pass def requires(self, graph_module: GraphModule) -> None: pass def ensures(self, graph_module: GraphModule) -> None: pass", - "docstring": "Base interface for implementing passes. It is required to implement the function so that we can directly pass instances of the Pass directly to the PassManager and call them as a function. We can directly pass an instance of a class implementing this interface into the PassManager's attribute.", - "type": "class", - "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_base.py", - "ast_data": "ClassDef name:PassBase Call call:compatibility FunctionDef name:__call__ arguments arg:self arg:graph_module type:GraphModule Assign Call call:call Return return:yes FunctionDef name:call arguments arg:self arg:graph_module type:GraphModule FunctionDef name:requires arguments arg:self arg:graph_module type:GraphModule FunctionDef name:ensures arguments arg:self arg:graph_module type:GraphModule" - }, - { - "library": "numpy", - "name": "feature_untied", - "source_code": "def feature_untied(self, names): assert not isinstance(names, str) and hasattr(names, '__iter__') final = [] for n in names: implies = self.feature_implies(n) tied = [nn for nn in final if nn in implies and n in self.feature_implies(nn)] if tied: tied = self.feature_sorted(tied + [n]) if n not in tied[1:]: continue final.remove(tied[: 1][0]) final.append(n) return final", - "docstring": "same as 'feature_ahead()' but if both features implied each other and keep the highest interest. Parameters ---------- 'names': sequence sequence of CPU feature names in uppercase. Returns ------- list of CPU features sorted as-is 'names' Examples -------- >>> self.feature_untied([\"SSE2\", \"SSE3\", \"SSE41\"]) [\"SSE2\", \"SSE3\", \"SSE41\"] # assume AVX2 and FMA3 implies each other >>> self.feature_untied([\"SSE2\", \"SSE3\", \"SSE41\", \"FMA3\", \"AVX2\"]) [\"SSE2\", \"SSE3\", \"SSE41\", \"AVX2\"]", - "type": "method", - "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py", - "ast_data": "FunctionDef name:feature_untied arguments arg:self arg:names Assign For Assign Call call:feature_implies Assign If Assign Call call:feature_sorted If Compare op:NotIn Return return:yes" - }, - { - "library": "django", - "name": "get_many", - "source_code": "def get_many(self, keys, version = None): d = {} for k in keys: val = self.get(k, self._missing_key, version = version) if val is not self._missing_key: d[k] = val return d", - "docstring": "Fetch a bunch of keys from the cache. For certain backends (memcached, pgsql) this can be *much* faster when fetching multiple values. Return a dict mapping each key in keys to its value. If the given key is missing, it will be missing from the response dict.", - "type": "method", - "file_path": "django\\django\\core\\cache\\backends\\base.py", - "ast_data": "FunctionDef name:get_many arguments arg:self arg:keys arg:version Assign For Assign Call call:get If Compare op:IsNot Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "gen_lists_of_dims", - "source_code": "def gen_lists_of_dims(num_tensors: int, dim_size: int, counter: int): res = [] for _ in range(num_tensors): dims, counter = gen_tensor_dims(dim_size, counter) res.append(dims) return (res, counter)", - "docstring": "Generate lists of DVar to represent tensor dimensions Args: num_tensors: the required number of tensors dim_size: the number of dimensions for each tensor counter: variable tracking Returns: A list of a list of tensor dimensions", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py", - "ast_data": "FunctionDef name:gen_lists_of_dims arguments arg:num_tensors type:int arg:dim_size type:int arg:counter type:int Assign For Call call:range Assign Call call:gen_tensor_dims Return return:yes" - }, - { - "library": "kornia", - "name": "forward", - "source_code": "def forward(self, queries: Tensor, keys: Tensor, values: Tensor, q_mask: Optional[Tensor] = None, kv_mask: Optional[Tensor] = None) -> Tensor: QK = torch.einsum('nlhd, nshd->nlsh', queries, keys) if kv_mask is not None and q_mask is not None: QK.masked_fill_(~(q_mask[:, :, None, None] * kv_mask[:, None, :, None]), float('-inf')) softmax_temp = 1.0 / queries.size(3) ** 0.5 A = torch.softmax(softmax_temp * QK, dim = 2) if self.use_dropout: A = self.dropout(A) queried_values = torch.einsum('nlsh, nshd->nlhd', A, values) return queried_values.contiguous()", - "docstring": "Multi-head scaled dot-product attention, a.k.a full attention. Args: queries: [N, L, H, D] keys: [N, S, H, D] values: [N, S, H, D] q_mask: [N, L] kv_mask: [N, S] Returns: queried_values: (N, L, H, D)", - "type": "method", - "file_path": "kornia\\kornia\\feature\\loftr\\loftr_module\\linear_attention.py", - "ast_data": "FunctionDef name:forward arguments arg:self arg:queries type:Tensor arg:keys type:Tensor arg:values type:Tensor arg:q_mask type:Optional[Tensor] arg:kv_mask type:Optional[Tensor] Assign Call call:einsum If BoolOp Compare op:IsNot Compare op:IsNot Assign Assign Call call:softmax If Assign Call call:dropout Assign Call call:einsum Return return:yes" - }, - { - "library": "pygame", - "name": "array_blue", - "source_code": "def array_blue(surface): size = surface.get_size() array = numpy.empty(size, numpy.uint8) surface_to_array(array, surface, 'B') return array", - "docstring": "pygame.surfarray.array_blue(Surface): return array copy pixel blue into a 2d array Copy the pixel blue values from a Surface into a 2D array. This will work for any type of Surface format. This function will temporarily lock the Surface as pixels are copied (see the Surface.lock - lock the Surface memory for pixel access method).", - "type": "function", - "file_path": "pygame\\src_py\\surfarray.py", - "ast_data": "FunctionDef name:array_blue arguments arg:surface Assign Call call:get_size Assign Call call:empty Return return:yes" - }, - { - "library": "tensorflow", - "name": "enable_batch_variable_initialization", - "source_code": "def enable_batch_variable_initialization(): return _EXPERIMENTAL_TPU_BATCH_VARIABLE_INITIALIZATION and context.executing_eagerly() and (not save_context.in_save_context())", - "docstring": "Whether to batch variable initialization in tf.function.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py", - "ast_data": "FunctionDef name:enable_batch_variable_initialization arguments Return return:yes" - }, - { - "library": "pytorch", - "name": "first", - "source_code": "def first(seq): return next(iter(seq))", - "docstring": "The first element in a sequence >>> first(\"ABC\") 'A'", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\unification\\unification_tools.py", - "ast_data": "FunctionDef name:first arguments arg:seq Return return:yes" - }, - { - "library": "pytorch", - "name": "new_parameter_placeholder", - "source_code": "def new_parameter_placeholder(size: tuple[int, ...], dtype: torch.dtype, device: torch.device, requires_grad: bool) -> torch.nn.Parameter: result = torch.nn.Parameter(torch.empty(size, dtype = dtype, device = device), requires_grad = requires_grad) result.untyped_storage().resize_(0) return result", - "docstring": "Create a placeholder to be passed to the above functions", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\create_parameter_op.py", - "ast_data": "FunctionDef name:new_parameter_placeholder arguments arg:size type:tuple[int, ...] arg:dtype type:torch.dtype arg:device type:torch.device arg:requires_grad type:bool Assign Call call:Parameter Return return:yes" - }, - { - "library": "pytorch", - "name": "load_state_dict", - "source_code": "@override def load_state_dict(self, state_dict: dict[str, Any]) -> None: _schedulers = state_dict.pop('_schedulers') self.__dict__.update(state_dict) state_dict['_schedulers'] = _schedulers for idx, s in enumerate(_schedulers): self._schedulers[idx].load_state_dict(s)", - "docstring": "Load the scheduler's state. Args: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:.", - "type": "method", - "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", - "ast_data": "FunctionDef name:load_state_dict arguments arg:self arg:state_dict type:dict[str, Any] Assign Call call:pop Assign For Call call:enumerate" - }, - { - "library": "tensorflow", - "name": "from_proto", - "source_code": "@classmethod def from_proto(cls, proto: Any) -> 'Parameter': deserialized_type_constraint = serialization.deserialize(proto.type_constraint) if proto.HasField('type_constraint') else None return Parameter(proto.name, PROTO_TO_PY_ENUM[proto.kind], proto.is_optional, deserialized_type_constraint)", - "docstring": "Generate a Parameter from the proto representation.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py", - "ast_data": "FunctionDef name:from_proto arguments arg:cls arg:proto type:Any Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "execute_on", - "source_code": "def execute_on(self, worker): replica_args = _select_worker_slice(worker.worker_index, self._args) replica_kwargs = _select_worker_slice(worker.worker_index, self._kwargs) e = _get_error_from_remote_values(replica_args) or _get_error_from_remote_values(replica_kwargs) if e: if not isinstance(e, ClosureInputError): e = ClosureInputError(e) raise e with ops.device(worker.device_name): with context.executor_scope(worker.executor): with coordinator_context.with_dispatch_context(worker): with metric_utils.monitored_timer('closure_execution'): output_values = self._function(*nest.map_structure(coordinator_context.maybe_get_remote_value, replica_args), **nest.map_structure(coordinator_context.maybe_get_remote_value, replica_kwargs)) self.maybe_call_with_output_remote_value(lambda r: r._set_values(output_values))", - "docstring": "Executes the closure on the given worker. Args: worker: a object.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\cluster_coordinator.py", - "ast_data": "FunctionDef name:execute_on arguments arg:self arg:worker Assign Call call:_select_worker_slice Assign Call call:_select_worker_slice Assign BoolOp Call call:_get_error_from_remote_values Call call:_get_error_from_remote_values If If Assign Call call:ClosureInputError Raise raises:e With With With With Assign Call call:_function" - }, - { - "library": "django", - "name": "cycle_key", - "source_code": "def cycle_key(self): data = self._session key = self.session_key self.create() self._session_cache = data if key: self.delete(key)", - "docstring": "Create a new session key, while retaining the current session data.", - "type": "method", - "file_path": "django\\django\\contrib\\sessions\\backends\\base.py", - "ast_data": "FunctionDef name:cycle_key arguments arg:self Assign Assign Assign If" - }, - { - "library": "pytorch", - "name": "RendezvousSettings", - "source_code": "@dataclass(repr = False, eq = False, frozen = True) class RendezvousSettings: run_id: str min_nodes: int max_nodes: int timeout: RendezvousTimeout keep_alive_interval: timedelta keep_alive_max_attempt: int", - "docstring": "Hold the settings of the rendezvous. Attributes: run_id: The run id of the rendezvous. min_nodes: The minimum number of nodes to admit to the rendezvous. max_nodes: The maximum number of nodes to admit to the rendezvous. timeout: The timeout configuration of the rendezvous. keep_alive_interval: The amount of time a node waits before sending a heartbeat to keep it alive in the rendezvous. keep_alive_max_attempt: The maximum number of failed heartbeat attempts after which a node is considered dead.", - "type": "class", - "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py", - "ast_data": "ClassDef name:RendezvousSettings Call call:dataclass" - }, - { - "library": "scipy", - "name": "right_censored", - "source_code": "@classmethod def right_censored(cls, x, censored): x, censored = _validate_x_censored(x, censored) return cls(uncensored = x[~censored], right = x[censored])", - "docstring": "Create a instance of right-censored data. Parameters ---------- x : array_like is the array of observed data or measurements. must be a one-dimensional sequence of finite numbers. censored : array_like of bool must be a one-dimensional sequence of boolean values. If `xCensoredDataCensoredData` that represents the collection of uncensored and right-censored values. Examples -------- >>> from scipy.stats import CensoredData Two uncensored values (4 and 10) and two right-censored values (24 and 25). >>> data = CensoredData.right_censored([4, 10, 24, 25], ... [False, False, True, True]) >>> data CensoredData(uncensored=array([ 4., 10.]), left=array([], dtype=float64), right=array([24., 25.]), interval=array([], shape=(0, 2), dtype=float64)) >>> print(data) CensoredData(4 values: 2 not censored, 2 right-censored)", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_censored_data.py", - "ast_data": "FunctionDef name:right_censored arguments arg:cls arg:x arg:censored Assign Call call:_validate_x_censored Return return:yes" - }, - { - "library": "mongo", - "name": "bulk_write_result", - "source_code": "@property def bulk_write_result(self) -> Optional[BulkWriteResult]: return self._bulk_write_result", - "docstring": "The result of the bulk write operation used to update the key vault collection with one or more rewrapped data keys. If :meth: does not find any matching keys to rewrap, no bulk write operation will be executed and this field will be ``.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\encryption.py", - "ast_data": "FunctionDef name:bulk_write_result arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "Trefethen", - "source_code": "class Trefethen(Benchmark): def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N)) self.custom_bounds = [(-5, 5), (-5, 5)] self.global_optimum = [[-0.02440307923, 0.2106124261]] self.fglob = -3.3068686474 def fun(self, x, *args): self.nfev + = 1 val = 0.25 * x[0] ** 2 + 0.25 * x[1] ** 2 val + = exp(sin(50.0 * x[0])) - sin(10 * x[0] + 10 * x[1]) val + = sin(60 * exp(x[1])) val + = sin(70 * sin(x[0])) val + = sin(sin(80 * x[1])) return val", - "docstring": "Trefethen objective function. This class defines the Trefethen [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Trefethen}}(x) = 0.25 x_{1}^{2} + 0.25 x_{2}^{2} + e^{\\sin\\left(50 x_{1}\\right)} - \\sin\\left(10 x_{1} + 10 x_{2}\\right) + \\sin\\left(60 e^{x_{2}}\\right) + \\sin\\left[70 \\sin\\left(x_{1}\\right)\\right] + \\sin\\left[\\sin\\left(80 x_{2}\\right)\\right] with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_T.py", - "ast_data": "ClassDef name:Trefethen FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Assign Return return:yes" - }, - { - "library": "scikit-learn", - "name": "fit", - "source_code": "@_fit_context(prefer_skip_nested_validation = True) def fit(self, X, y = None): X = validate_data(self, X, accept_sparse = 'csr', ensure_min_samples = 2) random_state = check_random_state(self.random_state) affinity_matrix = self._get_affinity_matrix(X) self.embedding_ = _spectral_embedding(affinity_matrix, n_components = self.n_components, eigen_solver = self.eigen_solver, eigen_tol = self.eigen_tol, random_state = random_state) return self", - "docstring": "Fit the model from data in X. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. If affinity is \"precomputed\" X : {array-like, sparse matrix}, shape (n_samples, n_samples), Interpret X as precomputed adjacency graph computed from samples. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Returns the instance itself.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\manifold\\_spectral_embedding.py", - "ast_data": "FunctionDef name:fit arguments arg:self arg:X arg:y Call call:_fit_context Assign Call call:validate_data Assign Call call:check_random_state Assign Call call:_get_affinity_matrix Assign Call call:_spectral_embedding Return return:yes" - }, - { - "library": "tensorflow", - "name": "extract", - "source_code": "@abc.abstractmethod def extract(self, accumulator): pass", - "docstring": "Convert an accumulator into a dict of output values. Args: accumulator: The accumulator to convert. Returns: A dict of ndarrays representing the data in this accumulator.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_preprocessing_layer.py", - "ast_data": "FunctionDef name:extract arguments arg:self arg:accumulator" - }, - { - "library": "numpy", - "name": "setbufsize", - "source_code": "@set_module('numpy') def setbufsize(size): old = _get_extobj_dict()['bufsize'] extobj = _make_extobj(bufsize = size) _extobj_contextvar.set(extobj) return old", - "docstring": "Set the size of the buffer used in ufuncs. .. versionchanged:: 2.0 The scope of setting the buffer is tied to the context. Exiting a `numpy.errstate` context manager the bufsize is restored: >>> import numpy as np >>> with np.errstate(): ... np.setbufsize(4096) ... print(np.getbufsize()) ... 8192 4096 >>> np.getbufsize() 8192", - "type": "function", - "file_path": "numpy\\numpy\\_core\\_ufunc_config.py", - "ast_data": "FunctionDef name:setbufsize arguments arg:size Call call:set_module Assign Assign Call call:_make_extobj Return return:yes" - }, - { - "library": "django", - "name": "get_db_prep_save", - "source_code": "def get_db_prep_save(self, value, connection): if hasattr(value, 'as_sql'): return value return self.get_db_prep_value(value, connection = connection, prepared = False)", - "docstring": "Return field's value prepared for saving into a database.", - "type": "method", - "file_path": "django\\django\\db\\models\\fields\\__init__.py", - "ast_data": "FunctionDef name:get_db_prep_save arguments arg:self arg:value arg:connection If Call call:hasattr Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "grid2mask", - "source_code": "def grid2mask(self, xi, yi): return (round(xi * self.x_grid2mask), round(yi * self.y_grid2mask))", - "docstring": "Return nearest space in mask-coords from given grid-coords.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\streamplot.py", - "ast_data": "FunctionDef name:grid2mask arguments arg:self arg:xi arg:yi Return return:yes" - }, - { - "library": "coconut", - "name": "def_in_exec", - "source_code": "def def_in_exec(name, code, needs_vars = {}, decorator = None): return '\\n_coconut_{name}_ns = {lbrace}\"_coconut\": _coconut{needs_vars}{rbrace}\\n_coconut_exec({code}, _coconut_{name}_ns)\\n{name} = {open_decorator}_coconut_{name}_ns[\"{name}\"]{close_decorator}\\n '.format(lbrace = '{', rbrace = '}', name = name, code = repr(code.strip()), needs_vars = ', ' + ', '.join((repr(var_in_def) + ': ' + var_out_def for var_in_def, var_out_def in needs_vars.items())) if needs_vars else '', open_decorator = decorator + '(' if decorator is not None else '', close_decorator = ')' if decorator is not None else '')", - "docstring": "Get code that runs code in an exec and extracts name.", - "type": "function", - "file_path": "coconut\\coconut\\compiler\\header.py", - "ast_data": "FunctionDef name:def_in_exec arguments arg:name arg:code arg:needs_vars arg:decorator Return return:yes" - }, - { - "library": "pandas", - "name": "notna", - "source_code": "def notna(self) -> npt.NDArray[np.bool_]: return ~self.isna()", - "docstring": "Inverse of isna Both missing values (-1 in .codes) and NA as a category are detected as null. Returns ------- np.ndarray[bool] of whether my values are not null See Also -------- notna : Top-level notna. notnull : Alias of notna. Categorical.isna : Boolean inverse of Categorical.notna.", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", - "ast_data": "FunctionDef name:notna arguments arg:self Return return:yes" - }, - { - "library": "scikit-learn", - "name": "expand_dims", - "source_code": "def expand_dims(a: Array, /, *, axis: int | tuple[int, ...] = (0,), xp: ModuleType | None = None) -> Array: if xp is None: xp = array_namespace(a) if not isinstance(axis, tuple): axis = (axis,) ndim = a.ndim + len(axis) if axis ! = () and (min(axis) < -ndim or max(axis) > = ndim): err_msg = f'a provided axis position is out of bounds for array of dimension {a.ndim}' raise IndexError(err_msg) axis = tuple((dim % ndim for dim in axis)) if len(set(axis)) ! = len(axis): err_msg = 'Duplicate dimensions specified in `axis`.' raise ValueError(err_msg) for i in sorted(axis): a = xp.expand_dims(a, axis = i) return a", - "docstring": "Expand the shape of an array. Insert (a) new axis/axes that will appear at the position(s) specified by in the expanded array shape. This is `axisaa` may also be a tuple: >>> y = xpx.expand_dims(x, axis=(0, 1), xp=xp) >>> y Array([[[1, 2]]], dtype=array_api_strict.int64) >>> y = xpx.expand_dims(x, axis=(2, 0), xp=xp) >>> y Array([[[1], [2]]], dtype=array_api_strict.int64)", - "type": "function", - "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_funcs.py", - "ast_data": "FunctionDef name:expand_dims arguments If Compare op:Is Assign Call call:array_namespace If Assign Assign If BoolOp Compare op:NotEq BoolOp Compare op:Lt Compare op:GtE Assign Raise raises:IndexError(err_msg) Assign Call call:tuple If Compare op:NotEq Assign Raise raises:ValueError(err_msg) For Call call:sorted Assign Call call:expand_dims Return return:yes" - }, - { - "library": "pytorch", - "name": "set_custom_combo_kernel_horizontal_partition", - "source_code": "def set_custom_combo_kernel_horizontal_partition(algorithm: Callable[[list[BaseSchedulerNode], SIMDScheduling, dict[BaseSchedulerNode, TritonKernel], dict[BaseSchedulerNode, tuple[Any, Any, Any, Any]]], list[list[BaseSchedulerNode]]]) -> None: global _custom_combo_kernel_horizontal_partition_algorithm _custom_combo_kernel_horizontal_partition_algorithm = algorithm", - "docstring": "Sets the algorithm used to partition nodes into horizontal partitions. Nodes in different partitions are implemented in different combo kernels. Nodes in the same partition are likely to be implemented in the same combo kernel, but subject to subsequent restricts like CUDA limits for number of args. The algorithm should take a list of nodes and return a list of list of nodes. The default algorithm is to partition nodes based on number of block dimensions.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\codegen\\triton_combo_kernel.py", - "ast_data": "FunctionDef name:set_custom_combo_kernel_horizontal_partition arguments arg:algorithm type:Callable[[list[BaseSchedulerNode], SIMDScheduling, dict[BaseSchedulerNode, TritonKernel], dict[BaseSchedulerNode, tuple[Any, Any, Any, Any]]], list[list[BaseSchedulerNode]]] Assign" - }, - { - "library": "matplotlib", - "name": "set_connectionstyle", - "source_code": "@_docstring.interpd def set_connectionstyle(self, connectionstyle = None, **kwargs): if connectionstyle is None: return ConnectionStyle.pprint_styles() self._connector = ConnectionStyle(connectionstyle, **kwargs) if isinstance(connectionstyle, str) else connectionstyle self.stale = True", - "docstring": "Set the connection style, possibly with further attributes. Attributes from the previous connection style are not reused. Without argument (or with `~matplotlib.patches.ConnectionStyle.ConnectionStyle.ConnectionStyle` object, as documented in that class. The following connection styles are available: %(ConnectionStyle:table_and_accepts)s **kwargs Additional attributes for the connection style. See the table above for supported parameters. Examples -------- :: set_connectionstyle(\"Arc,armA=30,rad=10\") set_connectionstyle(\"arc\", armA=30, rad=10)", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:set_connectionstyle arguments arg:self arg:connectionstyle kwarg:kwargs If Compare op:Is Return return:yes Assign Assign" - }, - { - "library": "salmon", - "name": "delete_pending", - "source_code": "def delete_pending(self, pending_id): self.pending.remove(pending_id)", - "docstring": "Removes the pending message from the pending queue.", - "type": "method", - "file_path": "salmon\\salmon\\confirm.py", - "ast_data": "FunctionDef name:delete_pending arguments arg:self arg:pending_id" - }, - { - "library": "prospector", - "name": "python_packages", - "source_code": "@property def python_packages(self) -> list[Path]: return [d for d in self.directories if is_python_package(d)]", - "docstring": "Lists every directory found in the given configuration which is a python module (that is, contains an file). This method is useful for passing to tools which will do their own discovery of python files.", - "type": "method", - "file_path": "prospector\\prospector\\finder.py", - "ast_data": "FunctionDef name:python_packages arguments arg:self Return return:yes" - }, - { - "library": "authlib", - "name": "validate_userinfo_signing_alg_values_supported", - "source_code": "def validate_userinfo_signing_alg_values_supported(self): validate_array_value(self, 'userinfo_signing_alg_values_supported')", - "docstring": "OPTIONAL. JSON array containing a list of the JWS signing algorithms (alg values) [JWA] supported by the UserInfo Endpoint to encode the Claims in a JWT. The value none MAY be included.", - "type": "method", - "file_path": "authlib\\authlib\\oidc\\discovery\\models.py", - "ast_data": "FunctionDef name:validate_userinfo_signing_alg_values_supported arguments arg:self" - }, - { - "library": "tensorflow", - "name": "merge_by_ref_with", - "source_code": "def merge_by_ref_with(self, other: 'FunctionCaptures') -> None: assert isinstance(other, FunctionCaptures) for key in other.by_ref_external: if key not in self._by_ref_external: self._by_ref_external[key] = other.by_ref_external[key] self._by_ref_tracetype[key] = other.by_ref_tracetype[key]", - "docstring": "Add by-ref captures from to if not exist.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\core\\function\\capture\\capture_container.py", - "ast_data": "FunctionDef name:merge_by_ref_with arguments arg:self arg:other type:'FunctionCaptures' For If Compare op:NotIn Assign Assign" - }, - { - "library": "pygame", - "name": "find_freetype", - "source_code": "def find_freetype(): pkg_config = DependencyProg('FREETYPE', 'FREETYPE_CONFIG', 'pkg-config freetype2', '2.0', ['freetype2'], '--modversion') if pkg_config.found: return pkg_config freetype_config = DependencyProg('FREETYPE', 'FREETYPE_CONFIG', 'freetype-config', '2.0', ['freetype'], '--ftversion') if freetype_config.found: return freetype_config return pkg_config", - "docstring": "modern freetype uses pkg-config. However, some older systems don't have that.", - "type": "function", - "file_path": "pygame\\buildconfig\\config_unix.py", - "ast_data": "FunctionDef name:find_freetype arguments Assign Call call:DependencyProg If Return return:yes Assign Call call:DependencyProg If Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "cat_safe", - "source_code": "def cat_safe(list_of_columns: list[npt.NDArray[np.object_]], sep: str): try: result = cat_core(list_of_columns, sep) except TypeError: for column in list_of_columns: dtype = lib.infer_dtype(column, skipna = True) if dtype not in ['string', 'empty']: raise TypeError(f'Concatenation requires list-likes containing only strings (or missing values). Offending values found in column {dtype}') from None return result", - "docstring": "Auxiliary function for :meth:. Same signature as cat_core, but handles TypeErrors in concatenation, which happen if the arrays in list_of columns have the wrong dtypes or content. Parameters ---------- list_of_columns : list of numpy arrays List of arrays to be concatenated with sep; these arrays may not contain NaNs! sep : string The separator string for concatenating the columns. Returns ------- nd.array The concatenation of list_of_columns with sep.", - "type": "function", - "file_path": "pandas\\pandas\\core\\strings\\accessor.py", - "ast_data": "FunctionDef name:cat_safe arguments arg:list_of_columns type:list[npt.NDArray[np.object_]] arg:sep type:str Try Assign Call call:cat_core ExceptHandler For Assign Call call:infer_dtype If Compare op:NotIn Raise raises:TypeError(f'Concatenation requires list-likes containing only strings (or missing values). Offending values found in column {dtype}') Return return:yes" - }, - { - "library": "matplotlib", - "name": "make_pdf_to_png_converter", - "source_code": "def make_pdf_to_png_converter(): try: mpl._get_executable_info('pdftocairo') except mpl.ExecutableNotFoundError: pass else: return lambda pdffile, pngfile, dpi: subprocess.check_output(['pdftocairo', '-singlefile', '-transp', '-png', '-r', '%d' % dpi, pdffile, os.path.splitext(pngfile)[0]], stderr = subprocess.STDOUT) try: gs_info = mpl._get_executable_info('gs') except mpl.ExecutableNotFoundError: pass else: return lambda pdffile, pngfile, dpi: subprocess.check_output([gs_info.executable, '-dQUIET', '-dSAFER', '-dBATCH', '-dNOPAUSE', '-dNOPROMPT', '-dUseCIEColor', '-dTextAlphaBits = 4', '-dGraphicsAlphaBits = 4', '-dDOINTERPOLATE', '-sDEVICE = pngalpha', '-sOutputFile = %s' % pngfile, '-r%d' % dpi, pdffile], stderr = subprocess.STDOUT) raise RuntimeError('No suitable pdf to png renderer found.')", - "docstring": "Return a function that converts a pdf file to a png file.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py", - "ast_data": "FunctionDef name:make_pdf_to_png_converter arguments Try ExceptHandler Return return:yes Try Assign Call call:_get_executable_info ExceptHandler Return return:yes Raise raises:RuntimeError('No suitable pdf to png renderer found.')" - }, - { - "library": "pytorch", - "name": "DDPCommHookType", - "source_code": "class DDPCommHookType(Enum): ALLREDUCE = partial(_ddp_comm_hook_wrapper, comm_hook = default.allreduce_hook) FP16_COMPRESS = partial(_ddp_comm_hook_wrapper, comm_hook = default.fp16_compress_hook) BF16_COMPRESS = partial(_ddp_comm_hook_wrapper, comm_hook = default.bf16_compress_hook) QUANTIZE_PER_TENSOR = partial(_ddp_comm_hook_wrapper, comm_hook = quantization.quantization_pertensor_hook) QUANTIZE_PER_CHANNEL = partial(_ddp_comm_hook_wrapper, comm_hook = quantization.quantization_perchannel_hook) POWER_SGD = partial(_powerSGD_comm_hook_wrapper, comm_hook = powerSGD.powerSGD_hook, matrix_approximation_rank = 1) POWER_SGD_RANK2 = partial(_powerSGD_comm_hook_wrapper, comm_hook = powerSGD.powerSGD_hook, matrix_approximation_rank = 2) BATCHED_POWER_SGD = partial(_powerSGD_comm_hook_wrapper, comm_hook = powerSGD.batched_powerSGD_hook, matrix_approximation_rank = 1) BATCHED_POWER_SGD_RANK2 = partial(_powerSGD_comm_hook_wrapper, comm_hook = powerSGD.batched_powerSGD_hook, matrix_approximation_rank = 2) NOOP = partial(_ddp_comm_hook_wrapper, comm_hook = debugging.noop_hook)", - "docstring": "Enumerate ``.", - "type": "class", - "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\__init__.py", - "ast_data": "ClassDef name:DDPCommHookType Assign Call call:partial Assign Call call:partial Assign Call call:partial Assign Call call:partial Assign Call call:partial Assign Call call:partial Assign Call call:partial Assign Call call:partial Assign Call call:partial Assign Call call:partial" - }, - { - "library": "pytorch", - "name": "from_float", - "source_code": "@classmethod def from_float(cls, mod, use_precomputed_fake_quant = False): if hasattr(mod, 'weight_fake_quant'): assert type(mod) = = torch.ao.nn.qat.Embedding, 'nnq.' + cls.__name__ + '.from_float ' + 'with fake quant only works for ' + torch.ao.nn.qat.Embedding.__name__ weight_observer = mod.weight_fake_quant else: assert type(mod) = = nn.Embedding, 'nnq.' + cls.__name__ + '.from_float only works for ' + nn.Embedding.__name__ assert hasattr(mod, 'qconfig'), 'Embedding input float module must have qconfig defined' from torch.ao.quantization import float_qparams_weight_only_qconfig if mod.qconfig is not None and mod.qconfig.weight is not None: weight_observer = mod.qconfig.weight() else: weight_observer = float_qparams_weight_only_qconfig.weight() dtype = weight_observer.dtype is_float_qparams_qconfig = weight_observer.qscheme = = torch.per_channel_affine_float_qparams assert is_float_qparams_qconfig, 'Embedding quantization is only supported with float_qparams_weight_only_qconfig.' assert dtype = = torch.quint8 or dtype = = torch.quint4x2, f'The only supported dtype for nnq.Embedding is torch.quint8 and torch.quint4x2, got {dtype}' weight_observer(mod.weight) qweight = _quantize_weight(mod.weight.float(), weight_observer) qembedding = Embedding(mod.num_embeddings, mod.embedding_dim) qembedding.set_weight(qweight) return qembedding", - "docstring": "Create a quantized embedding module from a float module Args: mod (Module): a float module, either produced by torch.ao.quantization utilities or provided by user", - "type": "method", - "file_path": "pytorch\\torch\\ao\\nn\\quantized\\modules\\embedding_ops.py", - "ast_data": "FunctionDef name:from_float arguments arg:cls arg:mod arg:use_precomputed_fake_quant If Call call:hasattr Assign If BoolOp Compare op:IsNot Compare op:IsNot Assign Call call:weight Assign Call call:weight Assign Assign Compare op:Eq Assign Call call:_quantize_weight Assign Call call:Embedding Return return:yes" - }, - { - "library": "kornia", - "name": "fit", - "source_code": "def fit(self, X: Tensor) -> None: KORNIA_CHECK_SHAPE(X, ['N', 'D']) if self._cluster_centers is None: self._cluster_centers = self._initialise_cluster_centers(X, self.num_clusters) else: KORNIA_CHECK(X.shape[1] = = self._cluster_centers.shape[1], f'Dimensions at position 1 of X and cluster_centers do not match. {X.shape[1]} ! = {self._cluster_centers.shape[1]}') current_centers = self._cluster_centers previous_centers: Tensor | None = None iteration: int = 0 while True: distance: Tensor = self._pairwise_euclidean_distance(X, current_centers) cluster_assignment = distance.argmin(-1) previous_centers = current_centers.clone() for index in range(self.num_clusters): selected = torch.nonzero(cluster_assignment = = index).squeeze() selected = torch.index_select(X, 0, selected) if selected.shape[0] = = 0: selected = X[torch.randint(len(X), (1,), device = X.device)] current_centers[index] = selected.mean(dim = 0) center_shift = torch.sum(torch.sqrt(torch.sum((current_centers - previous_centers) ** 2, dim = 1))) iteration = iteration + 1 if self.tolerance is not None and center_shift ** 2 < self.tolerance: break if self.max_iterations ! = 0 and iteration > = self.max_iterations: break self._final_cluster_assignments = cluster_assignment self._final_cluster_centers = current_centers", - "docstring": "Fit iterative KMeans clustering till a threshold for shift in cluster centers or a maximum no of iterations have reached. Args: X: 2D input tensor to be clustered", - "type": "method", - "file_path": "kornia\\kornia\\contrib\\kmeans.py", - "ast_data": "FunctionDef name:fit arguments arg:self arg:X type:Tensor If Compare op:Is Assign Call call:_initialise_cluster_centers Assign While Assign Call call:argmin Assign Call call:clone For Call call:range Assign Call call:squeeze Assign Call call:index_select If Compare op:Eq Assign Assign Call call:mean Assign Call call:sum Assign If BoolOp Compare op:IsNot Compare op:Lt If BoolOp Compare op:NotEq Compare op:GtE Assign Assign" - }, - { - "library": "pytorch", - "name": "run_and_read_all", - "source_code": "def run_and_read_all(run_lambda, command): rc, out, _ = run_lambda(command) if rc ! = 0: return None return out", - "docstring": "Run command using run_lambda; reads and returns entire output if rc is 0.", - "type": "function", - "file_path": "pytorch\\torch\\utils\\collect_env.py", - "ast_data": "FunctionDef name:run_and_read_all arguments arg:run_lambda arg:command Assign Call call:run_lambda If Compare op:NotEq Return return:yes Return return:yes" - }, - { - "library": "scrapy", - "name": "copy", - "source_code": "def copy(self) -> Self: return copy.deepcopy(self)", - "docstring": "Make a deep copy of current settings. This method returns a new instance of the :class: class, populated with the same values and their priorities. Modifications to the new object won't be reflected on the original settings.", - "type": "method", - "file_path": "scrapy\\scrapy\\settings\\__init__.py", - "ast_data": "FunctionDef name:copy arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_from_proto_function", - "source_code": "def get_from_proto_function(collection_name) -> Optional[Callable[[message.Message], Any]]: try: return _proto_function_registry.lookup(collection_name)[2] except LookupError: return None", - "docstring": "Returns the from_proto function for collection_name.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", - "ast_data": "FunctionDef name:get_from_proto_function arguments arg:collection_name Try Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "cryptography", - "name": "private_bytes_raw", - "source_code": "@abc.abstractmethod def private_bytes_raw(self) -> bytes: pass", - "docstring": "The raw bytes of the private key. Equivalent to private_bytes(Raw, Raw, NoEncryption()).", - "type": "method", - "file_path": "cryptography\\src\\cryptography\\hazmat\\primitives\\asymmetric\\ed448.py", - "ast_data": "FunctionDef name:private_bytes_raw arguments arg:self" - }, - { - "library": "tensorflow", - "name": "parse_node_or_tensor_name", - "source_code": "def parse_node_or_tensor_name(name): if ': ' in name and (not name.endswith(': ')): node_name = name[: name.rfind(': ')] output_slot = int(name[name.rfind(': ') + 1:]) return (node_name, output_slot) else: return (name, None)", - "docstring": "Get the node name from a string that can be node or tensor name. Args: name: An input node name (e.g., \"node_a\") or tensor name (e.g., \"node_a:0\"), as a str. Returns: 1) The node name, as a str. If the input name is a tensor name, i.e., consists of a colon, the final colon and the following output slot will be stripped. 2) If the input name is a tensor name, the output slot, as an int. If the input name is not a tensor name, None.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_graphs.py", - "ast_data": "FunctionDef name:parse_node_or_tensor_name arguments arg:name If BoolOp Compare op:In Assign Assign Call call:int Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "StringGauge", - "source_code": "class StringGauge(Metric): __slots__ = [] def __init__(self, name, description, *labels): super(StringGauge, self).__init__('StringGauge', _string_gauge_methods, len(labels), name, description, *labels) def get_cell(self, *labels): return StringGaugeCell(super(StringGauge, self).get_cell(*labels))", - "docstring": "A stateful class for updating a gauge-like string metric. This class encapsulates a set of string values (or a single value for a label-less metric). Each value is identified by a tuple of labels. The class allows the user to set each value.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\eager\\monitoring.py", - "ast_data": "ClassDef name:StringGauge Assign FunctionDef name:__init__ arguments arg:self arg:name arg:description vararg:labels FunctionDef name:get_cell arguments arg:self vararg:labels Return return:yes" - }, - { - "library": "pytorch", - "name": "transform_subclass", - "source_code": "def transform_subclass(t, callback, outer_size = None, outer_stride = None): outer_size = outer_size if outer_size is not None else t.size() outer_stride = outer_stride if outer_stride is not None else t.stride() attrs, ctx = t.__tensor_flatten__() transformed_tensors_dict = {} for attr in attrs: transformed_tensors_dict[attr] = callback(attr, getattr(t, attr)) sub = type(t).__tensor_unflatten__(transformed_tensors_dict, ctx, outer_size, outer_stride) assert sub.shape = = outer_size, f'Expected return value from {type(t)}__tensor_unflatten__() to have shape equal to {outer_size}, but got: {sub.shape}' assert sub.stride() = = outer_stride, f'Expected return value from {type(t)}__tensor_unflatten__() to have stride equal to {outer_stride}, but got: {sub.stride()}' return sub", - "docstring": "Given a traceable, wrapper tensor subclass `transform_subclass` to get a transformed tensor, and putting each transformed tensor into the fresh tensor subclass instance. Note: this function will not handle ensuring that the fresh subclass gets the same (autograd, and aliasing) metadata as the original tensor. This is generally handled in other subsystems like AOTAutograd.", - "type": "function", - "file_path": "pytorch\\torch\\utils\\_python_dispatch.py", - "ast_data": "FunctionDef name:transform_subclass arguments arg:t arg:callback arg:outer_size arg:outer_stride Assign Assign Assign Call call:__tensor_flatten__ Assign For Assign Call call:callback Assign Call call:__tensor_unflatten__ Return return:yes" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "@_docstring.interpd def __init__(self, xy, width, height, *, angle = 0.0, rotation_point = 'xy', **kwargs): super().__init__(**kwargs) self._x0 = xy[0] self._y0 = xy[1] self._width = width self._height = height self.angle = float(angle) self.rotation_point = rotation_point self._aspect_ratio_correction = 1.0 self._convert_units()", - "docstring": "Parameters ---------- xy : (float, float) The anchor point. width : float Rectangle width. height : float Rectangle height. angle : float, default: 0 Rotation in degrees anti-clockwise about the rotation point. rotation_point : {'xy', 'center', (number, number)}, default: 'xy' If `~matplotlib.patches.Patch` properties %(Patch:kwdoc)s", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:xy arg:width arg:height kwarg:kwargs Assign Assign Assign Assign Assign Call call:float Assign Assign" - }, - { - "library": "pytorch", - "name": "ObserverBase", - "source_code": "class ObserverBase(ABC, nn.Module): def __init__(self, dtype, is_dynamic: bool = False): super().__init__() self.dtype = dtype self.is_dynamic = is_dynamic @abstractmethod def forward(self, x): pass @abstractmethod def calculate_qparams(self, **kwargs): pass with_args = classmethod(_with_args) with_callable_args = classmethod(_with_callable_args)", - "docstring": "Base observer Module. Any observer implementation should derive from this class. Concrete observers should follow the same API. In forward, they will update the statistics of the observed Tensor. And they should provide a function that computes the quantization parameters given the collected statistics. Args: dtype: dtype argument to the node needed to implement the reference model spec. is_dynamic: indicator for whether the observer is a placeholder for dynamic quantization or static quantization", - "type": "class", - "file_path": "pytorch\\torch\\ao\\quantization\\observer.py", - "ast_data": "ClassDef name:ObserverBase FunctionDef name:__init__ arguments arg:self arg:dtype arg:is_dynamic type:bool Assign Assign FunctionDef name:forward arguments arg:self arg:x FunctionDef name:calculate_qparams arguments arg:self kwarg:kwargs Assign Call call:classmethod Assign Call call:classmethod" - }, - { - "library": "numpy", - "name": "ids", - "source_code": "def ids(self): if self._mask is nomask: return (self.ctypes.data, id(nomask)) return (self.ctypes.data, self._mask.ctypes.data)", - "docstring": "Return the addresses of the data and mask areas. Parameters ---------- None Examples -------- >>> import numpy as np >>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1]) >>> x.ids() (166670640, 166659832) # may vary If the array has no mask, the address of is returned. This address is typically not close to the data in memory: >>> x = np.ma.array([1, 2, 3]) >>> x.ids() (166691080, 3083169284) # may vary", - "type": "method", - "file_path": "numpy\\numpy\\ma\\core.py", - "ast_data": "FunctionDef name:ids arguments arg:self If Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "sparse_fill_empty_rows", - "source_code": "@tf_export('sparse.fill_empty_rows', v1 = ['sparse.fill_empty_rows', 'sparse_fill_empty_rows']) @deprecation.deprecated_endpoints('sparse_fill_empty_rows') def sparse_fill_empty_rows(sp_input, default_value, name = None): sp_input = _convert_to_sparse_tensor(sp_input) with ops.name_scope(name, 'SparseFillEmptyRows', [sp_input]): default_value = ops.convert_to_tensor(default_value, dtype = sp_input.values.dtype) output_indices, output_values, empty_row_indicator, unused_reverse_index_map = gen_sparse_ops.sparse_fill_empty_rows(indices = sp_input.indices, values = sp_input.values, dense_shape = sp_input.dense_shape, default_value = default_value) return (sparse_tensor.SparseTensor(indices = output_indices, values = output_values, dense_shape = sp_input.dense_shape), empty_row_indicator)", - "docstring": "Fills empty rows in the input 2-D with a default value. This op adds entries with the specified at index for any row in the input that does not already have a value. For example, suppose has shape and non-empty values: [0, 1]: a [0, 3]: b [2, 0]: c [3, 1]: d Rows 1 and 4 are empty, so the output will be of shape with values: [0, 1]: a [0, 3]: b [1, 0]: default_value [2, 0]: c [3, 1]: d [4, 0]: default_value Note that the input may have empty columns at the end, with no effect on this op. The output will be in row-major order and will have the same shape as the input. This op also returns an indicator vector such that empty_row_indicator[i] = True iff row i was an empty row. Args: sp_input: A with shape . default_value: The value to fill for empty rows, with the same type as name: A name prefix for the returned tensors (optional) Returns: sp_ordered_output: A with shape , and with all empty rows filled in with . empty_row_indicator: A bool vector of length indicating whether each input row was empty. Raises: TypeError: If is not a .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py", - "ast_data": "FunctionDef name:sparse_fill_empty_rows arguments arg:sp_input arg:default_value arg:name Call call:tf_export Call call:deprecated_endpoints Assign Call call:_convert_to_sparse_tensor With Assign Call call:convert_to_tensor Assign Call call:sparse_fill_empty_rows Return return:yes" - }, - { - "library": "kornia", - "name": "from_coeffs", - "source_code": "@classmethod def from_coeffs(cls, w: float, x: float, y: float, z: float) -> 'Quaternion': return cls(tensor([w, x, y, z]))", - "docstring": "Create a quaternion from the data coefficients. Args: w: a float representing the :math: component. x: a float representing the :math: component. y: a float representing the :math: component. z: a float representing the :math: component. Example: >>> q = Quaternion.from_coeffs(1., 0., 0., 0.) >>> q.data Parameter containing: tensor([1., 0., 0., 0.], requires_grad=True)", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\quaternion.py", - "ast_data": "FunctionDef name:from_coeffs arguments arg:cls arg:w type:float arg:x type:float arg:y type:float arg:z type:float Return return:yes" - }, - { - "library": "flexx", - "name": "TreeWithControls", - "source_code": "class TreeWithControls(flx.TreeWidget): @flx.emitter def key_down(self, e): ev = self._create_key_event(e) if ev.key.startswith('Arrow'): e.preventDefault() return ev @flx.reaction('key_down') def _handle_highlighting(self, *events): for ev in events: if ev.modifiers: continue if ev.key = = 'Escape': self.highlight_hide() elif ev.key = = ' ': if self.max_selected = = 0: self.highlight_toggle_checked() else: self.highlight_toggle_selected() elif ev.key = = 'Enter': self.highlight_toggle_checked() elif ev.key = = 'ArrowRight': item = self.highlight_get() if item and item.items: item.collapsed = None elif ev.key = = 'ArrowLeft': item = self.highlight_get() if item and item.items: item.collapsed = True elif ev.key = = 'ArrowDown': self.highlight_show(1) elif ev.key = = 'ArrowUp': self.highlight_show(-1)", - "docstring": "Adds a key press handler to allow controlling the TreeWidget with the arrow keys, space, and enter.", - "type": "class", - "file_path": "flexx\\flexxamples\\howtos\\control_with_keys.py", - "ast_data": "ClassDef name:TreeWithControls FunctionDef name:key_down arguments arg:self arg:e Assign Call call:_create_key_event If Call call:startswith Return return:yes FunctionDef name:_handle_highlighting arguments arg:self vararg:events Call call:reaction For If If Compare op:Eq If Compare op:Eq If Compare op:Eq If Compare op:Eq If Compare op:Eq Assign Call call:highlight_get If BoolOp Assign If Compare op:Eq Assign Call call:highlight_get If BoolOp Assign If Compare op:Eq If Compare op:Eq" - }, - { - "library": "mongo", - "name": "parse_ipv6_literal_host", - "source_code": "def parse_ipv6_literal_host(entity: str, default_port: Optional[int]) -> tuple[str, Optional[Union[str, int]]]: if entity.find(']') = = -1: raise ValueError(\"an IPv6 address literal must be enclosed in '[' and ']' according to RFC 2732.\") i = entity.find(']: ') if i = = -1: return (entity[1: -1], default_port) return (entity[1: i], entity[i + 2:])", - "docstring": "Validates an IPv6 literal host:port string. Returns a 2-tuple of IPv6 literal followed by port where port is default_port if it wasn't specified in entity. :param entity: A string that represents an IPv6 literal enclosed in braces (e.g. '[::1]' or '[::1]:27017'). :param default_port: The port number to use when one wasn't specified in entity.", - "type": "function", - "file_path": "mongo\\pymongo\\uri_parser_shared.py", - "ast_data": "FunctionDef name:parse_ipv6_literal_host arguments arg:entity type:str arg:default_port type:Optional[int] If Compare op:Eq Raise raises:ValueError(\"an IPv6 address literal must be enclosed in '[' and ']' according to RFC 2732.\") Assign Call call:find If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "step", - "source_code": "@torch.no_grad() def step(self, closure = None): self._cuda_graph_capture_health_check() loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad: list[Tensor] = [] grads: list[Tensor] = [] row_vars: list[Optional[Tensor]] = [] col_vars: list[Optional[Tensor]] = [] variances: list[Optional[Tensor]] = [] state_steps: list[Tensor] = [] eps1, eps2 = group['eps'] has_complex = self._init_group(group, params_with_grad, grads, row_vars, col_vars, variances, state_steps) adafactor(params_with_grad, grads, row_vars, col_vars, variances, state_steps, d = group['d'], lr = group['lr'], beta2_decay = group['beta2_decay'], weight_decay = group['weight_decay'], eps1 = eps1, eps2 = eps2, foreach = group['foreach'], maximize = group['maximize'], grad_scale = getattr(self, 'grad_scale', None), found_inf = getattr(self, 'found_inf', None), has_complex = has_complex) return loss", - "docstring": "Perform a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss.", - "type": "method", - "file_path": "pytorch\\torch\\optim\\_adafactor.py", - "ast_data": "FunctionDef name:step arguments arg:self arg:closure Call call:no_grad Assign If Compare op:IsNot With Assign Call call:closure For Assign Assign Call call:_init_group Return return:yes" - }, - { - "library": "scipy", - "name": "lagrange_inversion", - "source_code": "def lagrange_inversion(a): n = len(a) f = sum((a[i] * x ** i for i in range(n))) h = (x / f).series(x, 0, n).removeO() hpower = [h ** 0] for k in range(n): hpower.append((hpower[-1] * h).expand()) b = [mp.mpf(0)] for k in range(1, n): b.append(hpower[k].coeff(x, k - 1) / k) b = [mp.mpf(x) for x in b] return b", - "docstring": "Given a series f(x) = a[1]*x + a[2]*x**2 + ... + a[n-1]*x**(n - 1), use the Lagrange inversion formula to compute a series g(x) = b[1]*x + b[2]*x**2 + ... + b[n-1]*x**(n - 1) so that f(g(x)) = g(f(x)) = x mod x**n. We must have a[0] = 0, so necessarily b[0] = 0 too. The algorithm is naive and could be improved, but speed isn't an issue here and it's easy to read.", - "type": "function", - "file_path": "scipy\\scipy\\special\\_precompute\\utils.py", - "ast_data": "FunctionDef name:lagrange_inversion arguments arg:a Assign Call call:len Assign Call call:sum Assign Call call:removeO Assign For Call call:range Assign For Call call:range Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "is_structseq_instance", - "source_code": "def is_structseq_instance(obj: object) -> bool: return is_structseq_class(type(obj))", - "docstring": "Return whether the object is an instance of PyStructSequence.", - "type": "function", - "file_path": "pytorch\\torch\\utils\\_pytree.py", - "ast_data": "FunctionDef name:is_structseq_instance arguments arg:obj type:object Return return:yes" - }, - { - "library": "pandas", - "name": "infer_axes", - "source_code": "def infer_axes(self) -> bool: s = self.storable if s is None: return False self.get_attrs() return True", - "docstring": "infer the axes of my storer return a boolean indicating if we have a valid storer or not", - "type": "method", - "file_path": "pandas\\pandas\\io\\pytables.py", - "ast_data": "FunctionDef name:infer_axes arguments arg:self Assign If Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "allreduce_hook", - "source_code": "def allreduce_hook(process_group: dist.ProcessGroup, bucket: dist.GradBucket) -> torch.futures.Future[torch.Tensor]: return _allreduce_fut(process_group, bucket.buffer())", - "docstring": "Call `` callback takes the mean and returns the result. If user registers this DDP communication hook, DDP results is expected to be same as the case where no hook was registered. Hence, this won't change behavior of DDP and user can use this as a reference or modify this hook to log useful information or any other purposes while unaffecting DDP behavior. Example:: >>> # xdoctest: +SKIP >>> ddp_model.register_comm_hook(process_group, allreduce_hook)", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\default_hooks.py", - "ast_data": "FunctionDef name:allreduce_hook arguments arg:process_group type:dist.ProcessGroup arg:bucket type:dist.GradBucket Return return:yes" - }, - { - "library": "pytorch", - "name": "BuildType", - "source_code": "class BuildType: def __init__(self, cmake_build_type_env: str | None = None) -> None: if cmake_build_type_env is not None: self.build_type_string = cmake_build_type_env return cmake_cache_txt = os.path.join(BUILD_DIR, 'CMakeCache.txt') if os.path.isfile(cmake_cache_txt): from .cmake_utils import get_cmake_cache_variables_from_file with open(cmake_cache_txt) as f: cmake_cache_vars = get_cmake_cache_variables_from_file(f) self.build_type_string = cast(str, cmake_cache_vars['CMAKE_BUILD_TYPE']) else: self.build_type_string = os.environ.get('CMAKE_BUILD_TYPE', 'Release') def is_debug(self) -> bool: return self.build_type_string = = 'Debug' def is_rel_with_deb_info(self) -> bool: return self.build_type_string = = 'RelWithDebInfo' def is_release(self) -> bool: return self.build_type_string = = 'Release'", - "docstring": "Checks build type. The build type will be given in :attr:. If :attr: is `` does not exist, os.environ['CMAKE_BUILD_TYPE'] will be used. Args: cmake_build_type_env (str): The value of os.environ['CMAKE_BUILD_TYPE']. If None, the actual build type will be inferred.", - "type": "class", - "file_path": "pytorch\\tools\\setup_helpers\\env.py", - "ast_data": "ClassDef name:BuildType FunctionDef name:__init__ arguments arg:self arg:cmake_build_type_env type:str | None If Compare op:IsNot Assign Return return:no Assign Call call:join If Call call:isfile With Assign Call call:get_cmake_cache_variables_from_file Assign Call call:cast Assign Call call:get FunctionDef name:is_debug arguments arg:self Return return:yes FunctionDef name:is_rel_with_deb_info arguments arg:self Return return:yes FunctionDef name:is_release arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "enable_observer", - "source_code": "def enable_observer(mod): if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod): mod.enable_observer()", - "docstring": "Enable observation for this module. Enable observation for this module, if applicable. Example usage:: # model is any PyTorch model model.apply(torch.ao.quantization.enable_observer)", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\fake_quantize.py", - "ast_data": "FunctionDef name:enable_observer arguments arg:mod If BoolOp Call call:isinstance Call call:_is_fake_quant_script_module" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, profile_datum_list, time_unit = cli_shared.TIME_UNIT_US): self._profile_datum_list = profile_datum_list self.formatted_start_time = [datum.start_time for datum in profile_datum_list] self.formatted_op_time = [cli_shared.time_to_readable_str(datum.op_time, force_time_unit = time_unit) for datum in profile_datum_list] self.formatted_exec_time = [cli_shared.time_to_readable_str(datum.node_exec_stats.all_end_rel_micros, force_time_unit = time_unit) for datum in profile_datum_list] self._column_names = ['Node', 'Op Type', 'Start Time (us)', 'Op Time (%s)' % time_unit, 'Exec Time (%s)' % time_unit, 'Filename: Lineno(function)'] self._column_sort_ids = [SORT_OPS_BY_OP_NAME, SORT_OPS_BY_OP_TYPE, SORT_OPS_BY_START_TIME, SORT_OPS_BY_OP_TIME, SORT_OPS_BY_EXEC_TIME, SORT_OPS_BY_LINE]", - "docstring": "Constructor. Args: profile_datum_list: List of objects. time_unit: must be in cli_shared.TIME_UNITS.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\profile_analyzer_cli.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:profile_datum_list arg:time_unit Assign Assign Assign Assign Assign Assign" - }, - { - "library": "pytorch", - "name": "transfer_parametrizations_and_params", - "source_code": "def transfer_parametrizations_and_params(from_module: Module, to_module: Module, tensor_name: Optional[str] = None) -> Module: if is_parametrized(from_module): assert isinstance(from_module.parametrizations, ModuleDict) parameters_to_transfer: Union[list, ModuleDict] = from_module.parametrizations if tensor_name is None else [tensor_name] assert hasattr(parameters_to_transfer, '__iter__') for parameter_name in parameters_to_transfer: if not hasattr(to_module, parameter_name): setattr(to_module, parameter_name, Parameter(getattr(from_module, parameter_name))) for param_func in from_module.parametrizations[parameter_name]: register_parametrization(to_module, parameter_name, param_func) assert isinstance(to_module.parametrizations, ModuleDict) if hasattr(from_module.parametrizations[parameter_name], 'original'): to_module.parametrizations[parameter_name].original = from_module.parametrizations[parameter_name].original else: num = 0 orig_num = 'original' + str(num) while hasattr(from_module.parametrizations[parameter_name], orig_num): setattr(to_module.parametrizations[parameter_name], orig_num, getattr(from_module.parametrizations[parameter_name], orig_num)) num = num + 1 orig_num = 'original' + str(num) return to_module", - "docstring": "Transfer parametrizations and the parameters they parametrize from :attr: to :attr:. If :attr: is specified, only transfers the specified parameter, otherwise transfers all parametrized parameters. If those parameters do not exist in to_module, it will create them. Does nothing if from_module is not parametrized. Args: from_module (nn.Module): module to transfer from to_module (nn.Module): module to transfer to tensor_name (str, optional): parameter to transfer Returns: Module: to_module", - "type": "function", - "file_path": "pytorch\\torch\\nn\\utils\\parametrize.py", - "ast_data": "FunctionDef name:transfer_parametrizations_and_params arguments arg:from_module type:Module arg:to_module type:Module arg:tensor_name type:Optional[str] If Call call:is_parametrized For If For If Call call:hasattr Assign Assign Assign While Call call:hasattr Assign Assign Return return:yes" - }, - { - "library": "pandas", - "name": "total_seconds", - "source_code": "def total_seconds(self) -> npt.NDArray[np.float64]: pps = periods_per_second(self._creso) return self._maybe_mask_results(self.asi8 / pps, fill_value = None)", - "docstring": "Return total duration of each element expressed in seconds. This method is available directly on TimedeltaArray, TimedeltaIndex and on Series containing timedelta values under the `float64` whose index is the same as the original. See Also -------- datetime.timedelta.total_seconds : Standard library version of this method. TimedeltaIndex.components : Return a DataFrame with components of each Timedelta. Examples -------- **Series** >>> s = pd.Series(pd.to_timedelta(np.arange(5), unit=\"D\")) >>> s 0 0 days 1 1 days 2 2 days 3 3 days 4 4 days dtype: timedelta64[ns] >>> s.dt.total_seconds() 0 0.0 1 86400.0 2 172800.0 3 259200.0 4 345600.0 dtype: float64 **TimedeltaIndex** >>> idx = pd.to_timedelta(np.arange(5), unit=\"D\") >>> idx TimedeltaIndex(['0 days', '1 days', '2 days', '3 days', '4 days'], dtype='timedelta64[ns]', freq=None) >>> idx.total_seconds() Index([0.0, 86400.0, 172800.0, 259200.0, 345600.0], dtype='float64')", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\timedeltas.py", - "ast_data": "FunctionDef name:total_seconds arguments arg:self Assign Call call:periods_per_second Return return:yes" - }, - { - "library": "scipy", - "name": "RotatedEllipse02", - "source_code": "class RotatedEllipse02(Benchmark): def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-500.0] * self.N, [500.0] * self.N)) self.custom_bounds = ([-2.0, 2.0], [-2.0, 2.0]) self.global_optimum = [[0.0, 0.0]] self.fglob = 0.0 def fun(self, x, *args): self.nfev + = 1 return x[0] ** 2.0 - x[0] * x[1] + x[1] ** 2.0", - "docstring": "Rotated Ellipse 2 objective function. This class defines the Rotated Ellipse 2 [1]_ global optimization problem. This is a unimodal minimization problem defined as follows: .. math:: f_{\\text{RotatedEllipse02}}(x) = x_1^2 - x_1 x_2 + x_2^2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_R.py", - "ast_data": "ClassDef name:RotatedEllipse02 FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Return return:yes" - }, - { - "library": "tensorflow", - "name": "replace_flat_tensors_for_gradients", - "source_code": "def replace_flat_tensors_for_gradients(xs, flat_grads): xs_structure = [_get_tensors_for_gradient(x) for x in xs] grads = nest.pack_sequence_as(xs_structure, flat_grads) return [_replace_tensors_for_gradient(x, grad) for x, grad in zip(xs, grads)]", - "docstring": "Replaces Tensors that should be differentiated in with . Args: xs: A list of s or s. flat_grads: A list of . Returns: A list of or .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\composite_tensor_gradient.py", - "ast_data": "FunctionDef name:replace_flat_tensors_for_gradients arguments arg:xs arg:flat_grads Assign Assign Call call:pack_sequence_as Return return:yes" - }, - { - "library": "pytorch", - "name": "dtype", - "source_code": "def dtype(self, node: IRNode) -> Optional[str]: if node is None: return 'void' return DTYPE_TO_CPP.get(node.get_layout().dtype)", - "docstring": "Generates code which represents dtype of a given node.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py", - "ast_data": "FunctionDef name:dtype arguments arg:self arg:node type:IRNode If Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "random_flip_left_right", - "source_code": "@tf_export('image.random_flip_left_right') @dispatch.add_dispatch_support def random_flip_left_right(image, seed = None): random_func = functools.partial(random_ops.random_uniform, seed = seed) return _random_flip(image, 1, random_func, 'random_flip_left_right')", - "docstring": "Randomly flip an image horizontally (left to right). With a 1 in 2 chance, outputs the contents of flipped along the second dimension, which is . Otherwise output the image as-is. When passing a batch of images, each image will be randomly flipped independent of other images. Example usage: >>> image = np.array([[[1], [2]], [[3], [4]]]) >>> tf.image.random_flip_left_right(image, 5).numpy().tolist() [[[2], [1]], [[4], [3]]] Randomly flip multiple images. >>> images = np.array( ... [ ... [[[1], [2]], [[3], [4]]], ... [[[5], [6]], [[7], [8]]] ... ]) >>> tf.image.random_flip_left_right(images, 6).numpy().tolist() [[[[2], [1]], [[4], [3]]], [[[5], [6]], [[7], [8]]]] For producing deterministic results given a value, use . Unlike using the param with ops, ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: 4-D Tensor of shape or 3-D Tensor of shape . seed: A Python integer. Used to create a random seed. See for behavior. Returns: A tensor of the same type and shape as . Raises: ValueError: if the shape of not supported.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py", - "ast_data": "FunctionDef name:random_flip_left_right arguments arg:image arg:seed Call call:tf_export Assign Call call:partial Return return:yes" - }, - { - "library": "django", - "name": "exceeds_maximum_length_ratio", - "source_code": "def exceeds_maximum_length_ratio(password, max_similarity, value): pwd_len = len(password) length_bound_similarity = max_similarity / 2 * pwd_len value_len = len(value) return pwd_len > = 10 * value_len and value_len < length_bound_similarity", - "docstring": "Test that value is within a reasonable range of password. The following ratio calculations are based on testing SequenceMatcher like this: for i in range(0,6): print(10**i, SequenceMatcher(a='A', b='A'*(10**i)).quick_ratio()) which yields: 1 1.0 10 0.18181818181818182 100 0.019801980198019802 1000 0.001998001998001998 10000 0.00019998000199980003 100000 1.999980000199998e-05 This means a length_ratio of 10 should never yield a similarity higher than 0.2, for 100 this is down to 0.02 and for 1000 it is 0.002. This can be calculated via 2 / length_ratio. As a result we avoid the potentially expensive sequence matching.", - "type": "function", - "file_path": "django\\django\\contrib\\auth\\password_validation.py", - "ast_data": "FunctionDef name:exceeds_maximum_length_ratio arguments arg:password arg:max_similarity arg:value Assign Call call:len Assign Assign Call call:len Return return:yes" - }, - { - "library": "pytorch", - "name": "mem_get_info", - "source_code": "def mem_get_info(device: 'Device' = None) -> tuple[int, int]: if device is None: device = torch.cuda.current_device() device = _get_device_index(device, optional = True) return torch.cuda.cudart().cudaMemGetInfo(device)", - "docstring": "Return the global free and total GPU memory for a given device using cudaMemGetInfo. Args: device (torch.device or int or str, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `cuda-memory-management` for more details about GPU memory management.", - "type": "function", - "file_path": "pytorch\\torch\\cuda\\memory.py", - "ast_data": "FunctionDef name:mem_get_info arguments arg:device type:'Device' If Compare op:Is Assign Call call:current_device Assign Call call:_get_device_index Return return:yes" - }, - { - "library": "pytorch", - "name": "assoc", - "source_code": "def assoc(d, key, value, factory = dict): d2 = factory() d2.update(d) d2[key] = value return d2", - "docstring": "Return a new dict with new key value pair New dict has d[key] set to value. Does not modify the initial dictionary. >>> assoc({\"x\": 1}, \"x\", 2) {'x': 2} >>> assoc({\"x\": 1}, \"y\", 3) # doctest: +SKIP {'x': 1, 'y': 3}", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\unification\\unification_tools.py", - "ast_data": "FunctionDef name:assoc arguments arg:d arg:key arg:value arg:factory Assign Call call:factory Assign Return return:yes" - }, - { - "library": "django", - "name": "tuple", - "source_code": "@property def tuple(self): return tuple((self[i].tuple for i in range(self.geom_count)))", - "docstring": "Return a tuple representation of this Geometry Collection.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", - "ast_data": "FunctionDef name:tuple arguments arg:self Return return:yes" - }, - { - "library": "pandas", - "name": "reduce", - "source_code": "def reduce(self, func: Callable) -> Self: assert self.ndim = = 2 res_blocks: list[Block] = [] for blk in self.blocks: nbs = blk.reduce(func) res_blocks.extend(nbs) index = Index([None]) new_mgr = type(self).from_blocks(res_blocks, [self.items, index]) return new_mgr", - "docstring": "Apply reduction function blockwise, returning a single-row BlockManager. Parameters ---------- func : reduction function Returns ------- BlockManager", - "type": "method", - "file_path": "pandas\\pandas\\core\\internals\\managers.py", - "ast_data": "FunctionDef name:reduce arguments arg:self arg:func type:Callable For Assign Call call:reduce Assign Call call:Index Assign Call call:from_blocks Return return:yes" - }, - { - "library": "tensorflow", - "name": "tflite_to_tosa_bytecode", - "source_code": "@tf_export('mlir.experimental.tflite_to_tosa_bytecode') def tflite_to_tosa_bytecode(flatbuffer, bytecode, use_external_constant = False, ordered_input_arrays = None, ordered_output_arrays = None): pywrap_mlir.experimental_tflite_to_tosa_bytecode(flatbuffer, bytecode, use_external_constant, ordered_input_arrays, ordered_output_arrays)", - "docstring": "Converts TFLite flatbuffer to TOSA dialect in MLIR bytecode. Args: flatbuffer: Path to flatbuffer. bytecode: Path to output bytecode. use_external_constant: Whether to create instead of . ordered_input_arrays: ordered_output_arrays: If ordered_output_arrays is not empty, then the function will only return nodes in ordered_output_arrays in the same order", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\compiler\\mlir\\mlir.py", - "ast_data": "FunctionDef name:tflite_to_tosa_bytecode arguments arg:flatbuffer arg:bytecode arg:use_external_constant arg:ordered_input_arrays arg:ordered_output_arrays Call call:tf_export" - }, - { - "library": "pytorch", - "name": "set_module", - "source_code": "def set_module(obj, mod): if not isinstance(mod, str): raise TypeError('The mod argument should be a string') obj.__module__ = mod", - "docstring": "Set the module attribute on a python object for a given object for nicer printing", - "type": "function", - "file_path": "pytorch\\torch\\utils\\__init__.py", - "ast_data": "FunctionDef name:set_module arguments arg:obj arg:mod If Raise raises:TypeError('The mod argument should be a string') Assign" - }, - { - "library": "django", - "name": "set_extra_mask", - "source_code": "def set_extra_mask(self, names): if names is None: self.extra_select_mask = None else: self.extra_select_mask = set(names) self._extra_select_cache = None", - "docstring": "Set the mask of extra select items that will be returned by SELECT. Don't remove them from the Query since they might be used later.", - "type": "method", - "file_path": "django\\django\\db\\models\\sql\\query.py", - "ast_data": "FunctionDef name:set_extra_mask arguments arg:self arg:names If Compare op:Is Assign Assign Call call:set Assign" - }, - { - "library": "cherrypy", - "name": "wait", - "source_code": "def wait(self, state, interval = 0.1, channel = None): if isinstance(state, (tuple, list)): if self.state not in state: events = tuple([self._get_state_event(s) for s in state]) win32event.WaitForMultipleObjects(events, 0, win32event.INFINITE) elif self.state ! = state: event = self._get_state_event(state) win32event.WaitForSingleObject(event, win32event.INFINITE)", - "docstring": "Wait for the given state(s), KeyboardInterrupt or SystemExit. Since this class uses native win32event objects, the interval argument is ignored.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\process\\win32.py", - "ast_data": "FunctionDef name:wait arguments arg:self arg:state arg:interval arg:channel If Call call:isinstance If Compare op:NotIn Assign Call call:tuple If Compare op:NotEq Assign Call call:_get_state_event" - }, - { - "library": "pytorch", - "name": "compare_ops", - "source_code": "def compare_ops(program_a: torch.export.ExportedProgram, program_b: torch.export.ExportedProgram) -> tuple[set[str], set[str]]: program_a_ops = set(_count_fx_targets(program_a)) program_b_ops = set(_count_fx_targets(program_b)) return (program_a_ops - program_b_ops, program_b_ops - program_a_ops)", - "docstring": "Compare and get unique ops in two exported programs. Args: program_a: The first exported program. program_b: The second exported program. Returns: A tuple of two sets, where the first set contains the unique ops in the first program and the second set contains the unique ops in the second program.", - "type": "function", - "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_analysis.py", - "ast_data": "FunctionDef name:compare_ops arguments arg:program_a type:torch.export.ExportedProgram arg:program_b type:torch.export.ExportedProgram Assign Call call:set Assign Call call:set Return return:yes" - }, - { - "library": "flexx", - "name": "init", - "source_code": "def init(self): pass", - "docstring": "Initializer method. This method can be overloaded when creating a custom class. It is called with this component as a context manager (i.e. it is the active component), and it receives any positional arguments that were passed to the constructor.", - "type": "method", - "file_path": "flexx\\flexx\\event\\_component.py", - "ast_data": "FunctionDef name:init arguments arg:self" - }, - { - "library": "pandas", - "name": "is_monotonic_decreasing", - "source_code": "@property def is_monotonic_decreasing(self) -> bool: return self._engine.is_monotonic_decreasing", - "docstring": "Return a boolean if the values are equal or decreasing. Returns ------- bool See Also -------- Index.is_monotonic_increasing : Check if the values are equal or increasing. Examples -------- >>> pd.Index([3, 2, 1]).is_monotonic_decreasing True >>> pd.Index([3, 2, 2]).is_monotonic_decreasing True >>> pd.Index([3, 1, 2]).is_monotonic_decreasing False", - "type": "method", - "file_path": "pandas\\pandas\\core\\indexes\\base.py", - "ast_data": "FunctionDef name:is_monotonic_decreasing arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "optional", - "source_code": "@property def optional(self) -> bool: return self.default is not self.empty", - "docstring": "If this parameter might not be supplied for a call.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py", - "ast_data": "FunctionDef name:optional arguments arg:self Return return:yes" - }, - { - "library": "sphinx", - "name": "Code", - "source_code": "class Code(SphinxDirective): optional_arguments = 1 option_spec: ClassVar[OptionSpec] = {'class': directives.class_option, 'force': directives.flag, 'name': directives.unchanged, 'number-lines': optional_int} has_content = True def run(self) -> list[Node]: self.assert_has_content() set_classes(self.options) code = '\\n'.join(self.content) node = nodes.literal_block(code, code, classes = self.options.get('classes', []), force = 'force' in self.options, highlight_args = {}) self.add_name(node) set_source_info(self, node) if self.arguments: node['language'] = self.arguments[0] else: node['language'] = self.env.current_document.highlight_language or self.config.highlight_language if 'number-lines' in self.options: node['linenos'] = True if self.options['number-lines']: node['highlight_args']['linenostart'] = self.options['number-lines'] return [node]", - "docstring": "Parse and mark up content of a code block. This is compatible with docutils' :rst:dir: directive.", - "type": "class", - "file_path": "sphinx\\sphinx\\directives\\patches.py", - "ast_data": "ClassDef name:Code Assign Assign FunctionDef name:run arguments arg:self Assign Call call:join Assign Call call:literal_block If Assign Assign BoolOp If Compare op:In Assign If Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, verts, sizes = None, *, closed = True, **kwargs): super().__init__(**kwargs) self.set_sizes(sizes) self.set_verts(verts, closed) self.stale = True", - "docstring": "Parameters ---------- verts : list of array-like The sequence of polygons [*verts0*, *verts1*, ...] where each element *verts_i* defines the vertices of polygon *i* as a 2D array-like of shape (M, 2). sizes : array-like, default: None Squared scaling factors for the polygons. The coordinates of each polygon *verts_i* are multiplied by the square-root of the corresponding entry in *sizes* (i.e., *sizes* specify the scaling of areas). The scaling is applied before the Artist master transform. closed : bool, default: True Whether the polygon should be closed by adding a CLOSEPOLY connection at the end. **kwargs Forwarded to .", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\collections.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:verts arg:sizes kwarg:kwargs Assign" - }, - { - "library": "matplotlib", - "name": "set_hatch", - "source_code": "def set_hatch(self, hatch): mhatch._validate_hatch_pattern(hatch) self._hatch = hatch self.stale = True", - "docstring": "Set the hatching pattern. *hatch* can be one of:: / - diagonal hatching \\ - back diagonal | - vertical - - horizontal + - crossed x - crossed diagonal o - small circle O - large circle . - dots * - stars Letters can be combined, in which case all the specified hatchings are done. If same letter repeats, it increases the density of hatching of that pattern. Parameters ---------- hatch : {'/', '\\\\', '|', '-', '+', 'x', 'o', 'O', '.', '*'}", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:set_hatch arguments arg:self arg:hatch Assign Assign" - }, - { - "library": "pygame", - "name": "print_", - "source_code": "def print_(*args, **kwds): stream = kwds.get('file', sys.stdout) sep = kwds.get('sep', ' ') end = kwds.get('end', '\\n') if args: stream.write(sep.join([str(arg) for arg in args])) if end: stream.write(end) try: stream.flush() except AttributeError: pass", - "docstring": "Print arguments in an MSYS console friendly way Keyword arguments: file, sep, end", - "type": "function", - "file_path": "pygame\\buildconfig\\msysio.py", - "ast_data": "FunctionDef name:print_ arguments vararg:args kwarg:kwds Assign Call call:get Assign Call call:get Assign Call call:get If If Try ExceptHandler" - }, - { - "library": "matplotlib", - "name": "set_data_interval", - "source_code": "def set_data_interval(self, vmin, vmax, ignore = False): raise NotImplementedError('Derived must override')", - "docstring": "Set the axis data limits. This method is for internal use. If *ignore* is False (the default), this method will never reduce the preexisting data limits, only expand them if *vmin* or *vmax* are not within them. Moreover, the order of *vmin* and *vmax* does not matter; the orientation of the axis will not change. If *ignore* is True, the data limits will be set exactly to `` in that order.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axis.py", - "ast_data": "FunctionDef name:set_data_interval arguments arg:self arg:vmin arg:vmax arg:ignore Raise raises:NotImplementedError('Derived must override')" - }, - { - "library": "pytorch", - "name": "__init__", - "source_code": "def __init__(self, seq_module): super().__init__() self.seq_module = seq_module", - "docstring": "Adds padding to the output of the module based on the given lengths. This is to ensure that the results of the model do not change when batch sizes change during inference. Input needs to be in the shape of (BxCxDxT) :param seq_module: The sequential module containing the conv stack.", - "type": "method", - "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchaudio_models.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:seq_module Assign" - }, - { - "library": "scikit-learn", - "name": "dbscan_clustering", - "source_code": "def dbscan_clustering(self, cut_distance, min_cluster_size = 5): labels = labelling_at_cut(self._single_linkage_tree_, cut_distance, min_cluster_size) infinite_index = self.labels_ = = _OUTLIER_ENCODING['infinite']['label'] missing_index = self.labels_ = = _OUTLIER_ENCODING['missing']['label'] labels[infinite_index] = _OUTLIER_ENCODING['infinite']['label'] labels[missing_index] = _OUTLIER_ENCODING['missing']['label'] return labels", - "docstring": "Return clustering given by DBSCAN without border points. Return clustering that would be equivalent to running DBSCAN* for a particular cut_distance (or epsilon) DBSCAN* can be thought of as DBSCAN without the border points. As such these results may differ slightly from due to the difference in implementation over the non-core points. This can also be thought of as a flat clustering derived from constant height cut through the single linkage tree. This represents the result of selecting a cut value for robust single linkage clustering. The allows the flat clustering to declare noise points (and cluster smaller than ). Parameters ---------- cut_distance : float The mutual reachability distance cut value to use to generate a flat clustering. min_cluster_size : int, default=5 Clusters smaller than this value with be called 'noise' and remain unclustered in the resulting flat clustering. Returns ------- labels : ndarray of shape (n_samples,) An array of cluster labels, one per datapoint. Outliers are labeled as follows: - Noisy samples are given the label -1. - Samples with infinite elements (+/- np.inf) are given the label -2. - Samples with missing data are given the label -3, even if they also have infinite elements.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\cluster\\_hdbscan\\hdbscan.py", - "ast_data": "FunctionDef name:dbscan_clustering arguments arg:self arg:cut_distance arg:min_cluster_size Assign Call call:labelling_at_cut Assign Compare op:Eq Assign Compare op:Eq Assign Assign Return return:yes" - }, - { - "library": "mongo", - "name": "reset_server", - "source_code": "def reset_server(self, address: _Address) -> TopologyDescription: unknown_sd = self._server_descriptions[address].to_unknown() return updated_topology_description(self, unknown_sd)", - "docstring": "A copy of this description, with one server marked Unknown.", - "type": "method", - "file_path": "mongo\\pymongo\\topology_description.py", - "ast_data": "FunctionDef name:reset_server arguments arg:self arg:address type:_Address Assign Call call:to_unknown Return return:yes" - }, - { - "library": "cherrypy", - "name": "check_site_config_entries_in_app_config", - "source_code": "def check_site_config_entries_in_app_config(self): for sn, app in cherrypy.tree.apps.items(): if not isinstance(app, cherrypy.Application): continue msg = [] for section, entries in app.config.items(): if section.startswith('/'): for key, value in entries.items(): for n in ('engine.', 'server.', 'tree.', 'checker.'): if key.startswith(n): msg.append('[%s] %s = %s' % (section, key, value)) if msg: msg.insert(0, 'The application mounted at %r contains the following config entries, which are only allowed in site-wide config. Move them to a [global] section and pass them to cherrypy.config.update() instead of tree.mount().' % sn) warnings.warn(os.linesep.join(msg))", - "docstring": "Check for mounted Applications that have site-scoped config.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\_cpchecker.py", - "ast_data": "FunctionDef name:check_site_config_entries_in_app_config arguments arg:self For Call call:items If Assign For Call call:items If Call call:startswith For Call call:items For If Call call:startswith If" - }, - { - "library": "tensorflow", - "name": "compiler_ir_generator", - "source_code": "def compiler_ir_generator(stage = 'hlo', device_name = None, platform_name = None): if device_name is not None: if platform_name is not None: raise ValueError('device_name and platform_name cannot be provided at the same time.') warnings.warn('device_name is being deprecated. Use platform_name.') device_name = compiler_ir.maybe_get_device_name(device_name) res_bytes = context.context().get_compiler_ir(device_name = device_name, platform_name = platform_name, function_name = fn_name, flat_args = list(filtered_flat_args), captured_inputs = concrete_fn.captured_inputs, stage = stage) if stage in ('stablehlo_serialized', 'hlo_serialized', 'optimized_hlo_serialized', 'optimized_hlo_proto_serialized'): return res_bytes else: return res_bytes.decode('utf-8')", - "docstring": "Gets the compiler IR bytes. Args: stage: The exported stage for the given function. device_name: The name of the device with the form as \"/job:localhost/replica:0/task:0/device:CPU:0\", \"/device:TPU:0\" etc. When this is used, actual device is used for getting the compiler IR. platform_name: The name of the platform, e.g. \"TPU\". See the comment in in . Returns: The compiler IR bytes.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\polymorphic_function.py", - "ast_data": "FunctionDef name:compiler_ir_generator arguments arg:stage arg:device_name arg:platform_name If Compare op:IsNot If Compare op:IsNot Raise raises:ValueError('device_name and platform_name cannot be provided at the same time.') Assign Call call:maybe_get_device_name Assign Call call:get_compiler_ir If Compare op:In Return return:yes Return return:yes" - }, - { - "library": "sphinx", - "name": "signature_from_ast", - "source_code": "def signature_from_ast(node: ast.FunctionDef, code: str = '') -> Signature: EMPTY = Parameter.empty args: ast.arguments = node.args defaults: tuple[ast.expr | None, ...] = tuple(args.defaults) pos_only_offset = len(args.posonlyargs) defaults_offset = pos_only_offset + len(args.args) - len(defaults) defaults = (None,) * defaults_offset + defaults params: list[Parameter] = [] for arg, defexpr in zip(args.posonlyargs, defaults, strict = False): params.append(_define(Parameter.POSITIONAL_ONLY, arg, code, defexpr = defexpr)) for arg, defexpr in zip(args.args, defaults[pos_only_offset:], strict = False): params.append(_define(Parameter.POSITIONAL_OR_KEYWORD, arg, code, defexpr = defexpr)) if args.vararg: params.append(_define(Parameter.VAR_POSITIONAL, args.vararg, code, defexpr = None)) for arg, defexpr in zip(args.kwonlyargs, args.kw_defaults, strict = False): params.append(_define(Parameter.KEYWORD_ONLY, arg, code, defexpr = defexpr)) if args.kwarg: params.append(_define(Parameter.VAR_KEYWORD, args.kwarg, code, defexpr = None)) return_annotation = ast_unparse(node.returns, code) or EMPTY return Signature(params, return_annotation = return_annotation)", - "docstring": "Create a :class: object from an AST node.", - "type": "function", - "file_path": "sphinx\\sphinx\\util\\inspect.py", - "ast_data": "FunctionDef name:signature_from_ast arguments arg:node type:ast.FunctionDef arg:code type:str Assign Assign Call call:len Assign Assign For Call call:zip For Call call:zip If For Call call:zip If Assign BoolOp Call call:ast_unparse Return return:yes" - }, - { - "library": "pytorch", - "name": "load_library", - "source_code": "def load_library(self, path): torch.ops.load_library(path)", - "docstring": "Loads a shared library from the given path into the current process. The library being loaded may run global initialization code to register custom classes with the PyTorch JIT runtime. This allows dynamically loading custom classes. For this, you should compile your class and the static registration code into a shared library object, and then call `` attribute, a set that may be inspected for the paths of all libraries loaded using this function. Args: path (str): A path to a shared library to load.", - "type": "method", - "file_path": "pytorch\\torch\\_classes.py", - "ast_data": "FunctionDef name:load_library arguments arg:self arg:path" - }, - { - "library": "pytorch", - "name": "OutputSharding", - "source_code": "@dataclass class OutputSharding: output_spec: OutputSpecType redistribute_schema: Optional[OpSchema] = None needs_redistribute: bool = False @cached_property def mesh(self): if isinstance(self.output_spec, DTensorSpec): return self.output_spec.mesh elif isinstance(self.output_spec, tuple): out_spec = self.output_spec[0] if isinstance(out_spec, DTensorSpec): return out_spec.mesh else: raise ValueError(f'Unknown output spec type: {type(out_spec)}') else: raise ValueError(f'Unknown output spec type: {type(self.output_spec)}')", - "docstring": "OutputSharding is a data class that is used by the sharding propagation, it could set the output_spec upon successful propagation. If needs_redistribute is set to True, a redistribute_schema would be returned together to indicate the input arguments needs to be redistributed before the op execution. NOTE: the redistribute_schema generated by sharding propagation should be exactly the same as the operator OpSchema, except the DTensorSpecs", - "type": "class", - "file_path": "pytorch\\torch\\distributed\\tensor\\_op_schema.py", - "ast_data": "ClassDef name:OutputSharding FunctionDef name:mesh arguments arg:self If Call call:isinstance Return return:yes If Call call:isinstance Assign If Call call:isinstance Return return:yes Raise raises:ValueError(f'Unknown output spec type: {type(out_spec)}') Raise raises:ValueError(f'Unknown output spec type: {type(self.output_spec)}')" - }, - { - "library": "pandas", - "name": "to_feather", - "source_code": "@doc(storage_options = _shared_docs['storage_options']) def to_feather(df: DataFrame, path: FilePath | WriteBuffer[bytes], storage_options: StorageOptions | None = None, **kwargs: Any) -> None: import_optional_dependency('pyarrow') from pyarrow import feather if not isinstance(df, DataFrame): raise ValueError('feather only support IO with DataFrames') with get_handle(path, 'wb', storage_options = storage_options, is_text = False) as handles: feather.write_feather(df, handles.handle, **kwargs)", - "docstring": "Write a DataFrame to the binary Feather format. Parameters ---------- df : DataFrame path : str, path object, or file-like object {storage_options} **kwargs : Additional keywords passed to .", - "type": "function", - "file_path": "pandas\\pandas\\io\\feather_format.py", - "ast_data": "FunctionDef name:to_feather arguments arg:df type:DataFrame arg:path type:FilePath | WriteBuffer[bytes] arg:storage_options type:StorageOptions | None kwarg:kwargs Call call:doc If Raise raises:ValueError('feather only support IO with DataFrames') With" - }, - { - "library": "tensorflow", - "name": "is_generic_union", - "source_code": "def is_generic_union(tp): return tp is not typing.Union and getattr(tp, '__origin__', None) is typing.Union", - "docstring": "Returns true if is a parameterized typing.Union value.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\util\\type_annotations.py", - "ast_data": "FunctionDef name:is_generic_union arguments arg:tp Return return:yes" - }, - { - "library": "coconut", - "name": "newer", - "source_code": "def newer(new_ver, old_ver, strict = False): if old_ver = = new_ver or old_ver + (0,) = = new_ver: return False for n, o in zip(new_ver, old_ver): if not isinstance(n, int): o = str(o) if o < n: return True elif o > n: return False return not strict", - "docstring": "Determines if the first version tuple is newer than the second. True if newer; False if older.", - "type": "function", - "file_path": "coconut\\coconut\\requirements.py", - "ast_data": "FunctionDef name:newer arguments arg:new_ver arg:old_ver arg:strict If BoolOp Compare op:Eq Compare op:Eq Return return:yes For Call call:zip If Assign Call call:str If Compare op:Lt Return return:yes If Compare op:Gt Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, learning_rate = 0.001, rho = 0.95, epsilon = 1e-08, use_locking = False, name = 'Adadelta'): super(AdadeltaOptimizer, self).__init__(use_locking, name) self._lr = learning_rate self._rho = rho self._epsilon = epsilon self._lr_t = None self._rho_t = None self._epsilon_t = None", - "docstring": "Construct a new Adadelta optimizer. Args: learning_rate: A or a floating point value. The learning rate. To match the exact form in the original paper use 1.0. rho: A or a floating point value. The decay rate. epsilon: A or a floating point value. A constant epsilon used to better conditioning the grad update. use_locking: If use locks for update operations. name: Optional name prefix for the operations created when applying gradients. Defaults to \"Adadelta\".", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\adadelta.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:learning_rate arg:rho arg:epsilon arg:use_locking arg:name Assign Assign Assign Assign Assign Assign" - }, - { - "library": "authlib", - "name": "fetch_token", - "source_code": "def fetch_token(self, url = None, body = '', method = 'POST', headers = None, auth = None, grant_type = None, state = None, **kwargs): state = state or self.state authorization_response = kwargs.pop('authorization_response', None) if authorization_response and '#' in authorization_response: return self.token_from_fragment(authorization_response, state) session_kwargs = self._extract_session_request_params(kwargs) if authorization_response and 'code = ' in authorization_response: grant_type = 'authorization_code' params = parse_authorization_code_response(authorization_response, state = state) kwargs['code'] = params['code'] if grant_type is None: grant_type = self.metadata.get('grant_type') if grant_type is None: grant_type = _guess_grant_type(kwargs) self.metadata['grant_type'] = grant_type body = self._prepare_token_endpoint_body(body, grant_type, **kwargs) if auth is None: auth = self.client_auth(self.token_endpoint_auth_method) if headers is None: headers = DEFAULT_HEADERS if url is None: url = self.metadata.get('token_endpoint') return self._fetch_token(url, body = body, auth = auth, method = method, headers = headers, **session_kwargs)", - "docstring": "Generic method for fetching an access token from the token endpoint. :param url: Access Token endpoint URL, if not configured, `OAuth2Token` object (a dict too).", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\client.py", - "ast_data": "FunctionDef name:fetch_token arguments arg:self arg:url arg:body arg:method arg:headers arg:auth arg:grant_type arg:state kwarg:kwargs Assign BoolOp Assign Call call:pop If BoolOp Compare op:In Return return:yes Assign Call call:_extract_session_request_params If BoolOp Compare op:In Assign Assign Call call:parse_authorization_code_response Assign If Compare op:Is Assign Call call:get If Compare op:Is Assign Call call:_guess_grant_type Assign Assign Call call:_prepare_token_endpoint_body If Compare op:Is Assign Call call:client_auth If Compare op:Is Assign If Compare op:Is Assign Call call:get Return return:yes" - }, - { - "library": "pytorch", - "name": "add_custom_scalars", - "source_code": "def add_custom_scalars(self, layout): torch._C._log_api_usage_once('tensorboard.logging.add_custom_scalars') self._get_file_writer().add_summary(custom_scalars(layout))", - "docstring": "Create special chart by collecting charts tags in 'scalars'. NOTE: This function can only be called once for each SummaryWriter() object. Because it only provides metadata to tensorboard, the function can be called before or after the training loop. Args: layout (dict): {categoryName: *charts*}, where *charts* is also a dictionary {chartName: *ListOfProperties*}. The first element in *ListOfProperties* is the chart's type (one of **Multiline** or **Margin**) and the second element should be a list containing the tags you have used in add_scalar function, which will be collected into the new chart. Examples:: layout = {'Taiwan':{'twse':['Multiline',['twse/0050', 'twse/2330']]}, 'USA':{ 'dow':['Margin', ['dow/aaa', 'dow/bbb', 'dow/ccc']], 'nasdaq':['Margin', ['nasdaq/aaa', 'nasdaq/bbb', 'nasdaq/ccc']]}} writer.add_custom_scalars(layout)", - "type": "method", - "file_path": "pytorch\\torch\\utils\\tensorboard\\writer.py", - "ast_data": "FunctionDef name:add_custom_scalars arguments arg:self arg:layout" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, byminute = None, interval = 1, tz = None): if byminute is None: byminute = range(60) rule = rrulewrapper(MINUTELY, byminute = byminute, interval = interval, bysecond = 0) super().__init__(rule, tz = tz)", - "docstring": "Parameters ---------- byminute : int or list of int, default: all minutes Ticks will be placed on every minute in *byminute*. Default is `~datetime.tzinfotimezonedateutil.tz`.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\dates.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:byminute arg:interval arg:tz If Compare op:Is Assign Call call:range Assign Call call:rrulewrapper" - }, - { - "library": "django", - "name": "add_prefix", - "source_code": "def add_prefix(self, field_name): return '%s-%s' % (self.prefix, field_name) if self.prefix else field_name", - "docstring": "Return the field name with a prefix appended, if this Form has a prefix set. Subclasses may wish to override.", - "type": "method", - "file_path": "django\\django\\forms\\forms.py", - "ast_data": "FunctionDef name:add_prefix arguments arg:self arg:field_name Return return:yes" - }, - { - "library": "coconut", - "name": "run_with_stack_size", - "source_code": "def run_with_stack_size(stack_kbs, func, *args, **kwargs): if stack_kbs < min_stack_size_kbs: raise CoconutException('--stack-size must be at least ' + str(min_stack_size_kbs) + ' KB') old_stack_size = threading.stack_size(stack_kbs * kilobyte) out = [] thread = threading.Thread(target = lambda *args, **kwargs: out.append(func(*args, **kwargs)), args = args, kwargs = kwargs) thread.start() thread.join() logger.log('Stack size used: ', old_stack_size, '->', stack_kbs * kilobyte) internal_assert(len(out) = = 1, 'invalid threading results', out) return out[0]", - "docstring": "Run the given function with a stack of the given size in KBs.", - "type": "function", - "file_path": "coconut\\coconut\\command\\util.py", - "ast_data": "FunctionDef name:run_with_stack_size arguments arg:stack_kbs arg:func vararg:args kwarg:kwargs If Compare op:Lt Raise raises:CoconutException('--stack-size must be at least ' + str(min_stack_size_kbs) + ' KB') Assign Call call:stack_size Assign Assign Call call:Thread Return return:yes" - }, - { - "library": "tensorflow", - "name": "constant_value", - "source_code": "def constant_value(pred): if isinstance(pred, int): if pred = = 1: pred = True elif pred = = 0: pred = False if isinstance(pred, variables.Variable): return None return smart_module.smart_constant_value(pred)", - "docstring": "Return the bool value for , or None if had a dynamic value. Args: pred: A scalar, either a Python bool or a TensorFlow boolean variable or tensor, or the Python integer 1 or 0. Returns: True or False if has a constant boolean value, None otherwise. Raises: TypeError: If is not a Variable, Tensor or bool, or Python integer 1 or 0.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\layers\\utils.py", - "ast_data": "FunctionDef name:constant_value arguments arg:pred If Call call:isinstance If Compare op:Eq Assign If Compare op:Eq Assign If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "numpy", - "name": "hermefromroots", - "source_code": "def hermefromroots(roots): return pu._fromroots(hermeline, hermemul, roots)", - "docstring": "Generate a HermiteE series with given roots. The function returns the coefficients of the polynomial .. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n), in HermiteE form, where the :math: are the roots specified in . If a zero has multiplicity n, then it must appear in n times. For instance, if 2 is a root of multiplicity three and 3 is a root of multiplicity 2, then looks something like [2, 2, 2, 3, 3]. The roots can appear in any order. If the returned coefficients are , then .. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x) The coefficient of the last term is not generally 1 for monic polynomials in HermiteE form. Parameters ---------- roots : array_like Sequence containing the roots. Returns ------- out : ndarray 1-D array of coefficients. If all roots are real then is a real array, if some of the roots are complex, then is complex even if all the coefficients in the result are real (see Examples below). See Also -------- numpy.polynomial.polynomial.polyfromroots numpy.polynomial.legendre.legfromroots numpy.polynomial.laguerre.lagfromroots numpy.polynomial.hermite.hermfromroots numpy.polynomial.chebyshev.chebfromroots Examples -------- >>> from numpy.polynomial.hermite_e import hermefromroots, hermeval >>> coef = hermefromroots((-1, 0, 1)) >>> hermeval((-1, 0, 1), coef) array([0., 0., 0.]) >>> coef = hermefromroots((-1j, 1j)) >>> hermeval((-1j, 1j), coef) array([0.+0.j, 0.+0.j])", - "type": "function", - "file_path": "numpy\\numpy\\polynomial\\hermite_e.py", - "ast_data": "FunctionDef name:hermefromroots arguments arg:roots Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_node_io_shapes", - "source_code": "def get_node_io_shapes(node, key): out_shape = [] for shape in node.attr[key].list.shape: out_shape.append([dim.size for dim in shape.dim]) return out_shape", - "docstring": "Returns the input/output shapes of a GraphDef Node.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\compiler\\tensorrt\\utils.py", - "ast_data": "FunctionDef name:get_node_io_shapes arguments arg:node arg:key Assign For Return return:yes" - }, - { - "library": "mongo", - "name": "read_pref_mode_from_name", - "source_code": "def read_pref_mode_from_name(name: str) -> int: return _MONGOS_MODES.index(name)", - "docstring": "Get the read preference mode from mongos/uri name.", - "type": "function", - "file_path": "mongo\\pymongo\\read_preferences.py", - "ast_data": "FunctionDef name:read_pref_mode_from_name arguments arg:name type:str Return return:yes" - }, - { - "library": "numpy", - "name": "have_f77c", - "source_code": "def have_f77c(self): simple_fortran_subroutine = '\\n subroutine simple\\n end\\n ' config_cmd = self.get_config_cmd() flag = config_cmd.try_compile(simple_fortran_subroutine, lang = 'f77') return flag", - "docstring": "Check for availability of Fortran 77 compiler. Use it inside source generating function to ensure that setup distribution instance has been initialized. Notes ----- True if a Fortran 77 compiler is available (because a simple Fortran 77 code was able to be compiled successfully).", - "type": "method", - "file_path": "numpy\\numpy\\distutils\\misc_util.py", - "ast_data": "FunctionDef name:have_f77c arguments arg:self Assign Assign Call call:get_config_cmd Assign Call call:try_compile Return return:yes" - }, - { - "library": "pytorch", - "name": "PerTensor", - "source_code": "@dataclass(frozen = True) class PerTensor(Granularity): pass", - "docstring": "Represents per-tensor granularity in quantization. This granularity type calculates the quantization parameters based off the entire tensor.", - "type": "class", - "file_path": "pytorch\\torch\\ao\\quantization\\observer.py", - "ast_data": "ClassDef name:PerTensor Call call:dataclass" - }, - { - "library": "matplotlib", - "name": "get_picker", - "source_code": "def get_picker(self): return self._picker", - "docstring": "Return the picking behavior of the artist. The possible values are described in . See Also -------- .Artist.set_picker, .Artist.pickable, .Artist.pick", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\artist.py", - "ast_data": "FunctionDef name:get_picker arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "sparse_reduce_max_sparse", - "source_code": "@tf_export(v1 = ['sparse.reduce_max_sparse', 'sparse_reduce_max_sparse']) @deprecation.deprecated_endpoints('sparse_reduce_max_sparse') @deprecation.deprecated_args(None, 'keep_dims is deprecated, use keepdims instead', 'keep_dims') def sparse_reduce_max_sparse(sp_input, axis = None, keepdims = None, reduction_axes = None, keep_dims = None): keepdims = deprecation.deprecated_argument_lookup('keepdims', keepdims, 'keep_dims', keep_dims) axis = deprecation.deprecated_argument_lookup('axis', axis, 'reduction_axes', reduction_axes) if keepdims is None: keepdims = False output_ind, output_val, output_shape = gen_sparse_ops.sparse_reduce_max_sparse(sp_input.indices, sp_input.values, sp_input.dense_shape, math_ops._ReductionDims(sp_input, axis), keepdims) return sparse_tensor.SparseTensor(output_ind, output_val, output_shape)", - "docstring": "Computes the max of elements across dimensions of a SparseTensor. This Op takes a SparseTensor and is the sparse counterpart to . In contrast to SparseReduceSum, this Op returns a SparseTensor. Note: A gradient is not defined for this function, so it can't be used in training models that need gradient descent. Reduces along the dimensions given in . Unless is true, the rank of the tensor is reduced by 1 for each entry in . If is true, the reduced dimensions are retained with length 1. If has no entries, all dimensions are reduced, and a tensor with a single element is returned. Additionally, the axes can be negative, which are interpreted according to the indexing rules in Python. Args: sp_input: The SparseTensor to reduce. Should have numeric type. axis: The dimensions to reduce; list or scalar. If (the default), reduces all dimensions. keepdims: If true, retain reduced dimensions with length 1. reduction_axes: Deprecated name of axis. keep_dims: Deprecated alias for . Returns: The reduced SparseTensor.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py", - "ast_data": "FunctionDef name:sparse_reduce_max_sparse arguments arg:sp_input arg:axis arg:keepdims arg:reduction_axes arg:keep_dims Call call:tf_export Call call:deprecated_endpoints Call call:deprecated_args Assign Call call:deprecated_argument_lookup Assign Call call:deprecated_argument_lookup If Compare op:Is Assign Assign Call call:sparse_reduce_max_sparse Return return:yes" - }, - { - "library": "scikit-learn", - "name": "predict_proba", - "source_code": "def predict_proba(self, X): check_is_fitted(self) n_classes = self.n_classes_ if n_classes = = 1: return np.ones((_num_samples(X), 1)) decision = self.decision_function(X) return self._compute_proba_from_decision(decision, n_classes)", - "docstring": "Predict class probabilities for X. The predicted class probabilities of an input sample is computed as the weighted mean predicted class probabilities of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. Returns ------- p : ndarray of shape (n_samples, n_classes) The class probabilities of the input samples. The order of outputs is the same of that of the :term: attribute.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py", - "ast_data": "FunctionDef name:predict_proba arguments arg:self arg:X Assign If Compare op:Eq Return return:yes Assign Call call:decision_function Return return:yes" - }, - { - "library": "tensorflow", - "name": "abs", - "source_code": "@dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def abs(x): return math_ops.abs(x)", - "docstring": "Element-wise absolute value. Args: x: Tensor or variable. Returns: A tensor.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", - "ast_data": "FunctionDef name:abs arguments arg:x Return return:yes" - }, - { - "library": "feincms", - "name": "items", - "source_code": "def items(self): base_qs = self.queryset if callable(base_qs): base_qs = base_qs() self.max_depth = base_qs.aggregate(Max('level'))['level__max'] or 0 if self.depth_cutoff > 0: self.max_depth = min(self.depth_cutoff, self.max_depth) qs = base_qs.filter(redirect_to = '') if self.filter: qs = self.filter(qs) if self.navigation_only: qs = qs.filter(in_navigation = True) if self.depth_cutoff > 0: qs = qs.filter(level__lte = self.max_depth - 1) pages = [p for p in qs if p.is_active()] if self.extended_navigation: for idx, page in enumerate(pages): if self.depth_cutoff > 0 and page.level = = self.max_depth: continue if getattr(page, 'navigation_extension', None): cnt = 0 for p in page.extended_navigation(): depth_too_deep = self.depth_cutoff > 0 and p.level > self.depth_cutoff not_in_nav = self.navigation_only and (not p.in_navigation) if depth_too_deep or not_in_nav: continue cnt + = 1 pages.insert(idx + cnt, p) if p.level > self.max_depth: self.max_depth = p.level self.per_level = 1.0 / (self.max_depth + 1.0) return pages", - "docstring": "Consider all pages that are active and that are not a redirect", - "type": "method", - "file_path": "feincms\\feincms\\module\\page\\sitemap.py", - "ast_data": "FunctionDef name:items arguments arg:self Assign If Call call:callable Assign Call call:base_qs Assign BoolOp If Compare op:Gt Assign Call call:min Assign Call call:filter If Assign Call call:filter If Assign Call call:filter If Compare op:Gt Assign Call call:filter Assign If For Call call:enumerate If BoolOp Compare op:Gt Compare op:Eq If Call call:getattr Assign For Call call:extended_navigation Assign BoolOp Compare op:Gt Compare op:Gt Assign BoolOp If BoolOp If Compare op:Gt Assign Assign Return return:yes" - }, - { - "library": "seaborn", - "name": "to_utf8", - "source_code": "def to_utf8(obj): if isinstance(obj, str): return obj try: return obj.decode(encoding = 'utf-8') except AttributeError: return str(obj)", - "docstring": "Return a string representing a Python object. Strings (i.e. type ``", - "type": "function", - "file_path": "seaborn\\seaborn\\utils.py", - "ast_data": "FunctionDef name:to_utf8 arguments arg:obj If Call call:isinstance Return return:yes Try Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "pytorch", - "name": "Future", - "source_code": "class Future(torch._C.Future, Generic[T], metaclass = _PyFutureMeta): def __init__(self, *, devices: Optional[list[Union[int, str, torch.device]]] = None): if devices is None: devices = [] super().__init__([torch.device(d) for d in devices]) def done(self) -> bool: return super().done() def wait(self) -> T: return super().wait() def value(self) -> T: return super().value() def then(self, callback: Callable[[Future[T]], S]) -> Future[S]: return cast(Future[S], super().then(callback)) def add_done_callback(self, callback: Callable[[Future[T]], None]) -> None: super().add_done_callback(callback) def set_result(self, result: T) -> None: super().set_result(result) def set_exception(self, result: T) -> None: assert isinstance(result, Exception), f'{result} is of type {type(result)}, not an Exception.' def raise_error(fut_result): raise fut_result super()._set_unwrap_func(raise_error) self.set_result(result)", - "docstring": "Wrapper around a `~torch.distributed.rpc.rpc_async`. It also exposes a set of APIs to add callback functions and set results. .. warning:: GPU support is a beta feature, subject to changes.", - "type": "class", - "file_path": "pytorch\\torch\\futures\\__init__.py", - "ast_data": "ClassDef name:Future FunctionDef name:__init__ arguments arg:self If Compare op:Is Assign FunctionDef name:done arguments arg:self Return return:yes FunctionDef name:wait arguments arg:self Return return:yes FunctionDef name:value arguments arg:self Return return:yes FunctionDef name:then arguments arg:self arg:callback type:Callable[[Future[T]], S] Return return:yes FunctionDef name:add_done_callback arguments arg:self arg:callback type:Callable[[Future[T]], None] FunctionDef name:set_result arguments arg:self arg:result type:T FunctionDef name:set_exception arguments arg:self arg:result type:T FunctionDef name:raise_error arguments arg:fut_result Raise raises:fut_result" - }, - { - "library": "mongo", - "name": "ConnectionCheckedOutEvent", - "source_code": "class ConnectionCheckedOutEvent(_ConnectionDurationEvent): __slots__ = ()", - "docstring": "Published when the driver successfully checks out a connection. :param address: The address (host, port) pair of the server this Connection is attempting to connect to. :param connection_id: The integer ID of the Connection in this Pool. .. versionadded:: 3.9", - "type": "class", - "file_path": "mongo\\pymongo\\monitoring.py", - "ast_data": "ClassDef name:ConnectionCheckedOutEvent Assign" - }, - { - "library": "kornia", - "name": "make_samplers", - "source_code": "def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None: gain = _range_bound(self.gain, 'gain').to(device, dtype) self.gain_sampler = UniformDistribution(gain[0], gain[1], validate_args = False) sign = _range_bound(self.sign, 'sign', bounds = (-1.0, 1.0), center = 0.0).to(device, dtype) self.sign_sampler = UniformDistribution(sign[0], sign[1], validate_args = False) self.directions_sampler = UniformDistribution(0, 4, validate_args = False)", - "docstring": "Create samplers for generating random gaussian illumination parameters.", - "type": "method", - "file_path": "kornia\\kornia\\augmentation\\random_generator\\_2d\\linear_illumination.py", - "ast_data": "FunctionDef name:make_samplers arguments arg:self arg:device type:torch.device arg:dtype type:torch.dtype Assign Call call:to Assign Call call:UniformDistribution Assign Call call:to Assign Call call:UniformDistribution Assign Call call:UniformDistribution" - }, - { - "library": "numpy", - "name": "feature_extra_checks", - "source_code": "@_Cache.me def feature_extra_checks(self, name): assert isinstance(name, str) d = self.feature_supported[name] extra_checks = d.get('extra_checks', []) if not extra_checks: return [] self.dist_log(\"Testing extra checks for feature '%s'\" % name, extra_checks) flags = self.feature_flags(name) available = [] not_available = [] for chk in extra_checks: test_path = os.path.join(self.conf_check_path, 'extra_%s.c' % chk.lower()) if not os.path.exists(test_path): self.dist_fatal('extra check file does not exist', test_path) is_supported = self.dist_test(test_path, flags + self.cc_flags['werror']) if is_supported: available.append(chk) else: not_available.append(chk) if not_available: self.dist_log('testing failed for checks', not_available, stderr = True) return available", - "docstring": "Return a list of supported extra checks after testing them against the compiler. Parameters ---------- names : str CPU feature name in uppercase.", - "type": "method", - "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py", - "ast_data": "FunctionDef name:feature_extra_checks arguments arg:self arg:name Assign Assign Call call:get If Return return:yes Assign Call call:feature_flags Assign Assign For Assign Call call:join If Assign Call call:dist_test If If Return return:yes" - }, - { - "library": "pytorch", - "name": "create_python_return_type_bindings_header", - "source_code": "def create_python_return_type_bindings_header(fm: FileManager, pairs: Sequence[PythonSignatureNativeFunctionPair], pred: Callable[[NativeFunction], bool], filename: str) -> None: py_return_types_declarations: list[str] = [] grouped = group_filter_overloads(pairs, pred) for name in sorted(grouped.keys(), key = str): overloads = grouped[name] declarations = generate_return_type_declarations(overloads) py_return_types_declarations.append('' if not declarations else '\\n'.join(declarations)) fm.write_with_template(filename, filename, lambda: {'generated_comment': '@' + f'generated from {fm.template_dir_for_comments()}/{filename}', 'py_return_types_declarations': py_return_types_declarations})", - "docstring": "Generate function to initialize and return named tuple for native functions which returns named tuple and relevant entry for the map in .", - "type": "function", - "file_path": "pytorch\\tools\\autograd\\gen_python_functions.py", - "ast_data": "FunctionDef name:create_python_return_type_bindings_header arguments arg:fm type:FileManager arg:pairs type:Sequence[PythonSignatureNativeFunctionPair] arg:pred type:Callable[[NativeFunction], bool] arg:filename type:str Assign Call call:group_filter_overloads For Call call:sorted Assign Assign Call call:generate_return_type_declarations" - }, - { - "library": "pytorch", - "name": "validate_constraints", - "source_code": "def validate_constraints(self): if self._validated: return for constraint in self.constraints: _validate_pass_schedule_constraint(constraint, self.passes) self._validated = True", - "docstring": "Validates that current pass schedule defined by is valid according to all constraints in", - "type": "method", - "file_path": "pytorch\\torch\\fx\\passes\\infra\\pass_manager.py", - "ast_data": "FunctionDef name:validate_constraints arguments arg:self If Return return:no For Assign" - }, - { - "library": "pytorch", - "name": "stats", - "source_code": "def stats(self, inclusive: bool = False) -> FunctionCounts: return self.stmt_inclusive_stats if inclusive else self.stmt_exclusive_stats", - "docstring": "Returns detailed function counts. Conceptually, the FunctionCounts returned can be thought of as a tuple of (count, path_and_function_name) tuples. matches the semantics of callgrind. If True, the counts include instructions executed by children. is useful for identifying hot spots in code; is useful for reducing noise when diffing counts from two different runs. (See CallgrindStats.delta(...) for more details)", - "type": "method", - "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\valgrind_wrapper\\timer_interface.py", - "ast_data": "FunctionDef name:stats arguments arg:self arg:inclusive type:bool Return return:yes" - }, - { - "library": "coconut", - "name": "interpret", - "source_code": "def interpret(code, in_vars): try: result = eval(code, in_vars) except SyntaxError: pass else: if result is not None: logger.print(ascii(result)) return result _coconut_exec(code, in_vars)", - "docstring": "Try to evaluate the given code, otherwise execute it.", - "type": "function", - "file_path": "coconut\\coconut\\command\\util.py", - "ast_data": "FunctionDef name:interpret arguments arg:code arg:in_vars Try Assign Call call:eval ExceptHandler If Compare op:IsNot Return return:yes" - }, - { - "library": "django", - "name": "redirect_to_login", - "source_code": "def redirect_to_login(next, login_url = None, redirect_field_name = REDIRECT_FIELD_NAME): resolved_url = resolve_url(login_url or settings.LOGIN_URL) login_url_parts = list(urlsplit(resolved_url)) if redirect_field_name: querystring = QueryDict(login_url_parts[3], mutable = True) querystring[redirect_field_name] = next login_url_parts[3] = querystring.urlencode(safe = '/') return HttpResponseRedirect(urlunsplit(login_url_parts))", - "docstring": "Redirect the user to the login page, passing the given 'next' page.", - "type": "function", - "file_path": "django\\django\\contrib\\auth\\views.py", - "ast_data": "FunctionDef name:redirect_to_login arguments arg:next arg:login_url arg:redirect_field_name Assign Call call:resolve_url Assign Call call:list If Assign Call call:QueryDict Assign Assign Call call:urlencode Return return:yes" - }, - { - "library": "algorithms", - "name": "merge_set", - "source_code": "def merge_set(self, node1, node2): node1 = self.find_set(node1) node2 = self.find_set(node2) if self.size[node1] < self.size[node2]: self.parent[node1] = node2 self.size[node2] + = self.size[node1] else: self.parent[node2] = node1 self.size[node1] + = self.size[node2]", - "docstring": "Args: node1, node2 (int): Indexes of nodes whose sets will be merged.", - "type": "method", - "file_path": "algorithms\\algorithms\\graph\\minimum_spanning_tree.py", - "ast_data": "FunctionDef name:merge_set arguments arg:self arg:node1 arg:node2 Assign Call call:find_set Assign Call call:find_set If Compare op:Lt Assign Assign" - }, - { - "library": "pytorch", - "name": "hpu", - "source_code": "def hpu(self, device = None, non_blocking = False) -> Union[_StorageBase, TypedStorage]: device2 = torch.device('hpu', device) if device else torch.device('hpu') return self.to(device = device2, non_blocking = non_blocking)", - "docstring": "Returns a copy of this object in HPU memory. If this object is already in HPU memory and on the correct device, then no copy is performed and the original object is returned. Args: device (int): The destination HPU id. Defaults to the current device. non_blocking (bool): If `` and the source is in pinned memory, the copy will be asynchronous with respect to the host. Otherwise, the argument has no effect.", - "type": "method", - "file_path": "pytorch\\torch\\storage.py", - "ast_data": "FunctionDef name:hpu arguments arg:self arg:device arg:non_blocking Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "metadata", - "source_code": "def metadata(self) -> ShardedTensorMetadata: return self._metadata", - "docstring": "Returns a :class: object corresponding to the metadata for the entire tensor.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\api.py", - "ast_data": "FunctionDef name:metadata arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_loss_reduction", - "source_code": "@tf_export(v1 = ['distribute.get_loss_reduction']) def get_loss_reduction(): if not distribute_lib.get_strategy()._scale_loss_for_estimator: return ReduceOp.SUM last_reduction = ops.get_default_graph()._last_loss_reduction if last_reduction = = losses_impl.Reduction.SUM or last_reduction = = 'sum': return ReduceOp.SUM return ReduceOp.MEAN", - "docstring": "corresponding to the last loss reduction. Returns: corresponding to the last loss reduction for estimator and v1 optimizer use case. otherwise.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_utils.py", - "ast_data": "FunctionDef name:get_loss_reduction arguments Call call:tf_export If Return return:yes Assign If BoolOp Compare op:Eq Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "algorithms", - "name": "is_match", - "source_code": "def is_match(str_a, str_b): len_a, len_b = (len(str_a) + 1, len(str_b) + 1) matches = [[False] * len_b for _ in range(len_a)] matches[0][0] = True for i, element in enumerate(str_b[1:], 2): matches[0][i] = matches[0][i - 2] and element = = '*' for i, char_a in enumerate(str_a, 1): for j, char_b in enumerate(str_b, 1): if char_b ! = '*': matches[i][j] = matches[i - 1][j - 1] and char_b in (char_a, '.') else: matches[i][j] | = matches[i][j - 2] if char_a = = str_b[j - 2] or str_b[j - 2] = = '.': matches[i][j] | = matches[i - 1][j] return matches[-1][-1]", - "docstring": "Finds if matches Keyword arguments: str_a -- string str_b -- string", - "type": "function", - "file_path": "algorithms\\algorithms\\dp\\regex_matching.py", - "ast_data": "FunctionDef name:is_match arguments arg:str_a arg:str_b Assign Assign Assign For Call call:enumerate Assign BoolOp Compare op:Eq For Call call:enumerate For Call call:enumerate If Compare op:NotEq Assign BoolOp Compare op:In If BoolOp Compare op:Eq Compare op:Eq Return return:yes" - }, - { - "library": "django", - "name": "equals_identical", - "source_code": "def equals_identical(self, other): if geos_version_tuple() < (3, 12): raise GEOSException('GEOSGeometry.equals_identical() requires GEOS > = 3.12.0.') return capi.geos_equalsidentical(self.ptr, other.ptr)", - "docstring": "Return true if the two Geometries are point-wise equivalent.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", - "ast_data": "FunctionDef name:equals_identical arguments arg:self arg:other If Compare op:Lt Raise raises:GEOSException('GEOSGeometry.equals_identical() requires GEOS >= 3.12.0.') Return return:yes" - }, - { - "library": "scrapy", - "name": "get_response", - "source_code": "def get_response(self) -> Deferred[Response]: return self._deferred_response", - "docstring": "Simply return a Deferred which fires when response from the asynchronous request is available", - "type": "method", - "file_path": "scrapy\\scrapy\\core\\http2\\stream.py", - "ast_data": "FunctionDef name:get_response arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "getrow", - "source_code": "def getrow(self, i): return self._getrow(i)", - "docstring": "Returns a copy of row i of the matrix, as a (1 x n) sparse matrix (row vector).", - "type": "method", - "file_path": "scipy\\scipy\\sparse\\_matrix.py", - "ast_data": "FunctionDef name:getrow arguments arg:self arg:i Return return:yes" - }, - { - "library": "tensorflow", - "name": "empty", - "source_code": "@staticmethod def empty(element_spec): return _OptionalImpl(gen_optional_ops.optional_none(), element_spec)", - "docstring": "Returns an that has no value. NOTE: This method takes an argument that defines the structure of the value that would be contained in the returned if it had a value. >>> optional = tf.experimental.Optional.empty( ... tf.TensorSpec(shape=(), dtype=tf.int32, name=None)) >>> print(optional.has_value()) tf.Tensor(False, shape=(), dtype=bool) Args: element_spec: A (nested) structure of objects matching the structure of an element of this optional. Returns: A with no value.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\optional_ops.py", - "ast_data": "FunctionDef name:empty arguments arg:element_spec Return return:yes" - }, - { - "library": "scipy", - "name": "integers", - "source_code": "def integers(self, l_bounds: 'npt.ArrayLike', *, u_bounds: 'npt.ArrayLike | None' = None, n: IntNumber = 1, endpoint: bool = False, workers: IntNumber = 1) -> np.ndarray: if u_bounds is None: u_bounds = l_bounds l_bounds = 0 u_bounds = np.atleast_1d(u_bounds) l_bounds = np.atleast_1d(l_bounds) if endpoint: u_bounds = u_bounds + 1 if not np.issubdtype(l_bounds.dtype, np.integer) or not np.issubdtype(u_bounds.dtype, np.integer): message = \"'u_bounds' and 'l_bounds' must be integers or array-like of integers\" raise ValueError(message) if isinstance(self, Halton): sample = self.random(n = n, workers = workers) else: sample = self.random(n = n) sample = scale(sample, l_bounds = l_bounds, u_bounds = u_bounds) sample = np.floor(sample).astype(np.int64) return sample", - "docstring": "Draw integers from (inclusive) to (exclusive), or if endpoint=True, (inclusive) to (inclusive). Parameters ---------- l_bounds : int or array-like of ints Lowest (signed) integers to be drawn (unless `u_boundsHalton[0, 1)[a, b), b>aab` the upper bounds, the following transformation is used: .. math:: \\text{floor}((b - a) \\cdot \\text{sample} + a)", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_qmc.py", - "ast_data": "FunctionDef name:integers arguments arg:self arg:l_bounds type:'npt.ArrayLike' If Compare op:Is Assign Assign Assign Call call:atleast_1d Assign Call call:atleast_1d If Assign If BoolOp Assign Raise raises:ValueError(message) If Call call:isinstance Assign Call call:random Assign Call call:random Assign Call call:scale Assign Call call:astype Return return:yes" - }, - { - "library": "tensorflow", - "name": "broadcast_static_shape", - "source_code": "@tf_export('broadcast_static_shape') @dispatch.add_dispatch_support def broadcast_static_shape(shape_x, shape_y): return common_shapes.broadcast_shape(shape_x, shape_y)", - "docstring": "Computes the shape of a broadcast given known shapes. When and are fully known s this computes a which is the shape of the result of a broadcasting op applied in tensors of shapes and . For example, if shape_x is and shape_y is , the result is a TensorShape whose value is . This is useful when validating the result of a broadcasting operation when the tensors have statically known shapes. Example: >>> shape_x = tf.TensorShape([1, 2, 3]) >>> shape_y = tf.TensorShape([5, 1 ,3]) >>> tf.broadcast_static_shape(shape_x, shape_y) TensorShape([5, 2, 3]) Args: shape_x: A shape_y: A Returns: A representing the broadcasted shape. Raises: ValueError: If the two shapes can not be broadcasted.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", - "ast_data": "FunctionDef name:broadcast_static_shape arguments arg:shape_x arg:shape_y Call call:tf_export Return return:yes" - }, - { - "library": "kornia", - "name": "ImageModule", - "source_code": "class ImageModule(Module, ImageModuleMixIn, ONNXExportMixin): def __init__(self, *args: Any, **kwargs: Any) -> None: super().__init__(*args, **kwargs) self._disable_features: bool = False @property def disable_features(self) -> bool: return self._disable_features @disable_features.setter def disable_features(self, value: bool = True) -> None: self._disable_features = value def __call__(self, *inputs: Any, input_names_to_handle: Optional[list[Any]] = None, output_type: str = 'tensor', **kwargs: Any) -> Any: if not self._disable_features: decorated_forward = self.convert_input_output(input_names_to_handle = input_names_to_handle, output_type = output_type)(super().__call__) _output_image = decorated_forward(*inputs, **kwargs) if output_type = = 'tensor': self._output_image = self._detach_tensor_to_cpu(_output_image) else: self._output_image = _output_image else: _output_image = super().__call__(*inputs, **kwargs) return _output_image", - "docstring": "Handles image-based operations. This modules accepts multiple input and output data types, provides end-to-end visualization, file saving features. Note that this module fits the classes that return one image tensor only. Note: The additional add-on features increase the use of memories. To restore the original behaviour, you may set .", - "type": "class", - "file_path": "kornia\\kornia\\core\\module.py", - "ast_data": "ClassDef name:ImageModule FunctionDef name:__init__ arguments arg:self vararg:args kwarg:kwargs FunctionDef name:disable_features arguments arg:self Return return:yes FunctionDef name:disable_features arguments arg:self arg:value type:bool Assign FunctionDef name:__call__ arguments arg:self vararg:inputs kwarg:kwargs If Assign Call Assign Call call:decorated_forward If Compare op:Eq Assign Call call:_detach_tensor_to_cpu Assign Assign Call call:__call__ Return return:yes" - }, - { - "library": "django", - "name": "overlaps", - "source_code": "def overlaps(self, other): return capi.geos_overlaps(self.ptr, other.ptr)", - "docstring": "Return true if the DE-9IM intersection matrix for the two Geometries is T*T***T** (for two points or two surfaces) 1*T***T** (for two curves).", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", - "ast_data": "FunctionDef name:overlaps arguments arg:self arg:other Return return:yes" - }, - { - "library": "pytorch", - "name": "gather", - "source_code": "@_exception_logger def gather(tensor: torch.Tensor, gather_list: Optional[list[torch.Tensor]] = None, dst: Optional[int] = None, group: Optional[ProcessGroup] = None, async_op: bool = False, group_dst: Optional[int] = None): _check_single_tensor(tensor, 'tensor') if gather_list: _check_tensor_list(gather_list, 'gather_list') else: gather_list = [] _ensure_all_tensors_same_dtype(tensor, gather_list) group = _group_or_default_group(group) if _rank_not_in_group(group): _warn_not_in_group('gather') return if dst is None and group_dst is None: dst = 0 group_dst = _canonicalize_group_rank(group, dst, group_dst, return_global = False) my_group_rank = group.rank() _validate_output_list_for_rank(my_group_rank, group_dst, gather_list) output_tensors = [gather_list] if group_dst = = my_group_rank else [] input_tensors = [tensor] opts = GatherOptions() opts.rootRank = group_dst opts.asyncOp = async_op work = group.gather(output_tensors, input_tensors, opts) if async_op: return work elif work is not None: work.wait()", - "docstring": "Gathers a list of tensors in a single process. This function requires all tensors to be the same size on each process. Args: tensor (Tensor): Input tensor. gather_list (list[Tensor], optional): List of appropriately, same-sized tensors to use for gathered data (default is None, must be specified on the destination rank) dst (int, optional): Destination rank on global process group (regardless of `` Returns: Async work handle, if async_op is set to True. None, if not async_op or if not part of the group .. note:: Note that all Tensors in gather_list must have the same size. Example:: >>> # xdoctest: +SKIP(\"no rank\") >>> # We have 2 process groups, 2 ranks. >>> tensor_size = 2 >>> device = torch.device(f'cuda:{rank}') >>> tensor = torch.ones(tensor_size, device=device) + rank >>> if dist.get_rank() == 0: >>> gather_list = [torch.zeros_like(tensor, device=device) for i in range(2)] >>> else: >>> gather_list = None >>> dist.gather(tensor, gather_list, dst=0) >>> # Rank 0 gets gathered data. >>> gather_list [tensor([1., 1.], device='cuda:0'), tensor([2., 2.], device='cuda:0')] # Rank 0 None # Rank 1", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", - "ast_data": "FunctionDef name:gather arguments arg:tensor type:torch.Tensor arg:gather_list type:Optional[list[torch.Tensor]] arg:dst type:Optional[int] arg:group type:Optional[ProcessGroup] arg:async_op type:bool arg:group_dst type:Optional[int] If Assign Assign Call call:_group_or_default_group If Call call:_rank_not_in_group Return return:no If BoolOp Compare op:Is Compare op:Is Assign Assign Call call:_canonicalize_group_rank Assign Call call:rank Assign Assign Assign Call call:GatherOptions Assign Assign Assign Call call:gather If Return return:yes If Compare op:IsNot" - }, - { - "library": "pytorch", - "name": "register_log", - "source_code": "def register_log(setting_name, log_name): log_registry.register_log(setting_name, log_name)", - "docstring": "Enables a log to be controlled by the env var and user API with the setting_name Args: setting_name: the shorthand name used in the env var and user API log_name: the log name that the setting_name is associated with", - "type": "function", - "file_path": "pytorch\\torch\\_logging\\_internal.py", - "ast_data": "FunctionDef name:register_log arguments arg:setting_name arg:log_name" - }, - { - "library": "tensorflow", - "name": "base_dtype", - "source_code": "def base_dtype(dtype): dtype = dtypes.as_dtype(dtype) if hasattr(dtype, 'base_dtype'): return dtype.base_dtype return dtype", - "docstring": "Returns a non-reference based on this .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py", - "ast_data": "FunctionDef name:base_dtype arguments arg:dtype Assign Call call:as_dtype If Call call:hasattr Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "create_default_global_save_plan", - "source_code": "def create_default_global_save_plan(all_plans: list[SavePlan], rewrite_index_hints: bool = True) -> tuple[list[SavePlan], Metadata]: md: dict[str, STORAGE_TYPES] = {} new_plans = [] for plan in all_plans: new_items = [] for item in plan.items: if not item.type = = WriteItemType.SHARD: assert item.index.fqn not in md if item.type = = WriteItemType.BYTE_IO: md[item.index.fqn] = BytesStorageMetadata() new_items.append(item) else: assert item.tensor_data is not None tensor_md = cast(TensorStorageMetadata, md.setdefault(item.index.fqn, TensorStorageMetadata(properties = item.tensor_data.properties, size = item.tensor_data.size, chunks = []))) new_item = item if rewrite_index_hints: new_index = dataclasses.replace(item.index, index = len(tensor_md.chunks)) new_item = dataclasses.replace(item, index = new_index) new_items.append(new_item) assert item.tensor_data.chunk is not None, f'\\n Cannot create MD for tensor without bounds.\\n FQN: {item.index.fqn}\\n ' tensor_md.chunks.append(item.tensor_data.chunk) new_plans.append(dataclasses.replace(plan, items = new_items)) return (new_plans, Metadata(md))", - "docstring": "Create the global plan and metadata used by DefaultSavePlanner. Metadata is produced by concatenating the metadata of all `` is True.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\checkpoint\\default_planner.py", - "ast_data": "FunctionDef name:create_default_global_save_plan arguments arg:all_plans type:list[SavePlan] arg:rewrite_index_hints type:bool Assign For Assign For If If Compare op:Eq Assign Call call:BytesStorageMetadata Assign Call call:cast Assign If Assign Call call:replace Assign Call call:replace Return return:yes" - }, - { - "library": "pytorch", - "name": "Tanhshrink", - "source_code": "class Tanhshrink(Module): def forward(self, input: Tensor) -> Tensor: return F.tanhshrink(input)", - "docstring": "Applies the element-wise Tanhshrink function. .. math:: \\text{Tanhshrink}(x) = x - \\tanh(x) Shape: - Input: :math:, where :math: means any number of dimensions. - Output: :math:, same shape as the input. .. image:: ../scripts/activation_images/Tanhshrink.png Examples:: >>> m = nn.Tanhshrink() >>> input = torch.randn(2) >>> output = m(input)", - "type": "class", - "file_path": "pytorch\\torch\\nn\\modules\\activation.py", - "ast_data": "ClassDef name:Tanhshrink FunctionDef name:forward arguments arg:self arg:input type:Tensor Return return:yes" - }, - { - "library": "kornia", - "name": "get_transformation_matrix", - "source_code": "def get_transformation_matrix(self, input: Tensor, params: Optional[Dict[str, Tensor]] = None, flags: Optional[Dict[str, Any]] = None) -> Tensor: flags = self.flags if flags is None else flags if params is not None: transform = self.generate_transformation_matrix(input, params, flags) elif self.transform_matrix is None: params = self.forward_parameters(input.shape) transform = self.generate_transformation_matrix(input, params, flags) else: transform = self.transform_matrix return as_tensor(transform, device = input.device, dtype = input.dtype)", - "docstring": "Obtain transformation matrices. Return the current transformation matrix if existed. Generate a new one, otherwise.", - "type": "method", - "file_path": "kornia\\kornia\\augmentation\\_2d\\geometric\\base.py", - "ast_data": "FunctionDef name:get_transformation_matrix arguments arg:self arg:input type:Tensor arg:params type:Optional[Dict[str, Tensor]] arg:flags type:Optional[Dict[str, Any]] Assign If Compare op:IsNot Assign Call call:generate_transformation_matrix If Compare op:Is Assign Call call:forward_parameters Assign Call call:generate_transformation_matrix Assign Return return:yes" - }, - { - "library": "scrapy", - "name": "LocalWeakReferencedCache", - "source_code": "class LocalWeakReferencedCache(weakref.WeakKeyDictionary): def __init__(self, limit: int | None = None): super().__init__() self.data: LocalCache = LocalCache(limit = limit) def __setitem__(self, key: _KT, value: _VT) -> None: with contextlib.suppress(TypeError): super().__setitem__(key, value) def __getitem__(self, key: _KT) -> _VT | None: try: return super().__getitem__(key) except (TypeError, KeyError): return None", - "docstring": "A weakref.WeakKeyDictionary implementation that uses LocalCache as its underlying data structure, making it ordered and capable of being size-limited. Useful for memoization, while avoiding keeping received arguments in memory only because of the cached references. Note: like LocalCache and unlike weakref.WeakKeyDictionary, it cannot be instantiated with an initial dictionary.", - "type": "class", - "file_path": "scrapy\\scrapy\\utils\\datatypes.py", - "ast_data": "ClassDef name:LocalWeakReferencedCache FunctionDef name:__init__ arguments arg:self arg:limit type:int | None FunctionDef name:__setitem__ arguments arg:self arg:key type:_KT arg:value type:_VT With FunctionDef name:__getitem__ arguments arg:self arg:key type:_KT Try Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "tensorflow", - "name": "coordination_leader", - "source_code": "def coordination_leader(cluster_spec): cluster_spec = normalize_cluster_spec(cluster_spec) if not cluster_spec.as_dict(): return '' if 'ps' in cluster_spec.jobs: return '/job: ps/replica: 0/task: 0' if 'chief' in cluster_spec.jobs: return '/job: chief/replica: 0/task: 0' assert 'worker' in cluster_spec.jobs return '/job: worker/replica: 0/task: 0'", - "docstring": "Return the task name of the coordination service leader. Args: cluster_spec: a dict, or object sxpecifying the cluster configurations. Returns: a string indicating the task name of the coordination service leader.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\multi_worker_util.py", - "ast_data": "FunctionDef name:coordination_leader arguments arg:cluster_spec Assign Call call:normalize_cluster_spec If Return return:yes If Compare op:In Return return:yes If Compare op:In Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "worker_name", - "source_code": "def worker_name(self) -> Optional[str]: return self._worker_name", - "docstring": "Return the name of remote worker representing the remote device and `` if no worker name is available.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\remote_device.py", - "ast_data": "FunctionDef name:worker_name arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "__len__", - "source_code": "def __len__(self): if self._dims is None: raise ValueError('Cannot take the length of shape with unknown rank.') return len(self._dims)", - "docstring": "Returns the rank of this shape, or raises ValueError if unspecified.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py", - "ast_data": "FunctionDef name:__len__ arguments arg:self If Compare op:Is Raise raises:ValueError('Cannot take the length of shape with unknown rank.') Return return:yes" - }, - { - "library": "numpy", - "name": "det", - "source_code": "@array_function_dispatch(_unary_dispatcher) def det(a): a = asarray(a) _assert_stacked_square(a) t, result_t = _commonType(a) signature = 'D->D' if isComplexType(t) else 'd->d' r = _umath_linalg.det(a, signature = signature) r = r.astype(result_t, copy = False) return r", - "docstring": "Compute the determinant of an array. Parameters ---------- a : (..., M, M) array_like Input array to compute determinants for. Returns ------- det : (...) array_like Determinant of . See Also -------- slogdet : Another way to represent the determinant, more suitable for large matrices where underflow/overflow may occur. scipy.linalg.det : Similar function in SciPy. Notes ----- Broadcasting rules apply, see the documentation for details. The determinant is computed via LU factorization using the LAPACK routine ``. Examples -------- The determinant of a 2-D array [[a, b], [c, d]] is ad - bc: >>> import numpy as np >>> a = np.array([[1, 2], [3, 4]]) >>> np.linalg.det(a) -2.0 # may vary Computing determinants for a stack of matrices: >>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ]) >>> a.shape (3, 2, 2) >>> np.linalg.det(a) array([-2., -3., -8.])", - "type": "function", - "file_path": "numpy\\numpy\\linalg\\_linalg.py", - "ast_data": "FunctionDef name:det arguments arg:a Call call:array_function_dispatch Assign Call call:asarray Assign Call call:_commonType Assign Assign Call call:det Assign Call call:astype Return return:yes" - }, - { - "library": "django", - "name": "get_paginate_orphans", - "source_code": "def get_paginate_orphans(self): return self.paginate_orphans", - "docstring": "Return the maximum number of orphans extend the last page by when paginating.", - "type": "method", - "file_path": "django\\django\\views\\generic\\list.py", - "ast_data": "FunctionDef name:get_paginate_orphans arguments arg:self Return return:yes" - }, - { - "library": "numpy", - "name": "append_to", - "source_code": "def append_to(self, extlib): if is_sequence(extlib): lib_name, build_info = extlib dict_append(build_info, libraries = self.libraries, include_dirs = self.include_dirs) else: from numpy.distutils.core import Extension assert isinstance(extlib, Extension), repr(extlib) extlib.libraries.extend(self.libraries) extlib.include_dirs.extend(self.include_dirs)", - "docstring": "Append libraries, include_dirs to extension or library item.", - "type": "method", - "file_path": "numpy\\numpy\\distutils\\misc_util.py", - "ast_data": "FunctionDef name:append_to arguments arg:self arg:extlib If Call call:is_sequence Assign" - }, - { - "library": "tensorflow", - "name": "wrapped_intermediates", - "source_code": "@property def wrapped_intermediates(self): return list(self._wrapped_intermediates.values())", - "docstring": "The optional-wrapped intermediates captured from the forward graph.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\cond_v2.py", - "ast_data": "FunctionDef name:wrapped_intermediates arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "units", - "source_code": "@property def units(self): units, name = (None, None) if self.projected or self.local: units, name = capi.linear_units(self.ptr, byref(c_char_p())) elif self.geographic: units, name = capi.angular_units(self.ptr, byref(c_char_p())) if name is not None: name = force_str(name) return (units, name)", - "docstring": "Return a 2-tuple of the units value and the units name. Automatically determine whether to return the linear or angular units.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\srs.py", - "ast_data": "FunctionDef name:units arguments arg:self Assign If BoolOp Assign Call call:linear_units If Assign Call call:angular_units If Compare op:IsNot Assign Call call:force_str Return return:yes" - }, - { - "library": "tensorflow", - "name": "name", - "source_code": "def name(self): return self._tpu", - "docstring": "Return the name of the tpu, or the ip address if name is not provided.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py", - "ast_data": "FunctionDef name:name arguments arg:self Return return:yes" - }, - { - "library": "flexx", - "name": "MyWebSocketHandler", - "source_code": "class MyWebSocketHandler: class Application: pass class IOLoop: def __init__(self, loop): self._loop = loop def spawn_callback(self, func, *args): self._loop.call_soon_threadsafe(func, *args) def __init__(self, ws): self._ws = ws self.application = MyWebSocketHandler.Application() self.application._io_loop = MyWebSocketHandler.IOLoop(manager.loop) self.cookies = {} def write_message(self, message: Union[bytes, str, Dict[str, Any]], binary: bool = False): self._ws.send(message) def close(self, code: int = None, reason: str = None) -> None: if not self._ws.closed: self._ws.close(code, reason) def ws_closed(self): self.on_close()", - "docstring": "This class is designed to mimic the tornado WebSocketHandler to allow glue in code from WSHandler.", - "type": "class", - "file_path": "flexx\\flexx\\app\\_flaskserver.py", - "ast_data": "ClassDef name:MyWebSocketHandler ClassDef name:Application ClassDef name:IOLoop FunctionDef name:__init__ arguments arg:self arg:loop Assign FunctionDef name:spawn_callback arguments arg:self arg:func vararg:args FunctionDef name:__init__ arguments arg:self arg:ws Assign Assign Call call:Application Assign Call call:IOLoop Assign FunctionDef name:write_message arguments arg:self arg:message type:Union[bytes, str, Dict[str, Any]] arg:binary type:bool FunctionDef name:close arguments arg:self arg:code type:int arg:reason type:str If FunctionDef name:ws_closed arguments arg:self" - }, - { - "library": "scikit-learn", - "name": "predict", - "source_code": "def predict(self, X): check_is_fitted(self) return self.classes_[np.argmax(self.predict_proba(X), axis = 1)]", - "docstring": "Predict the target of new samples. The predicted class is the class that has the highest probability, and can thus be different from the prediction of the uncalibrated classifier. Parameters ---------- X : array-like of shape (n_samples, n_features) The samples, as accepted by . Returns ------- C : ndarray of shape (n_samples,) The predicted class.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\calibration.py", - "ast_data": "FunctionDef name:predict arguments arg:self arg:X Return return:yes" - }, - { - "library": "coconut", - "name": "final_indentation_level", - "source_code": "def final_indentation_level(code): level = 0 for line in literal_lines(code): leading_indent, _, trailing_indent = split_leading_trailing_indent(line) level + = ind_change(leading_indent) + ind_change(trailing_indent) return level", - "docstring": "Determine the final indentation level of the given code.", - "type": "function", - "file_path": "coconut\\coconut\\compiler\\util.py", - "ast_data": "FunctionDef name:final_indentation_level arguments arg:code Assign For Call call:literal_lines Assign Call call:split_leading_trailing_indent Return return:yes" - }, - { - "library": "pytorch", - "name": "FileOpenerIterDataPipe", - "source_code": "@functional_datapipe('open_files') class FileOpenerIterDataPipe(IterDataPipe[tuple[str, IOBase]]): def __init__(self, datapipe: Iterable[str], mode: str = 'r', encoding: Optional[str] = None, length: int = -1): super().__init__() self.datapipe: Iterable = datapipe self.mode: str = mode self.encoding: Optional[str] = encoding if self.mode not in ('b', 't', 'rb', 'rt', 'r'): raise ValueError(f'Invalid mode {mode}') if 'b' in mode and encoding is not None: raise ValueError(\"binary mode doesn't take an encoding argument\") self.length: int = length def __iter__(self): yield from get_file_binaries_from_pathnames(self.datapipe, self.mode, self.encoding) def __len__(self): if self.length = = -1: raise TypeError(f\"{type(self).__name__} instance doesn't have valid length\") return self.length", - "docstring": "Given pathnames, opens files and yield pathname and file stream in a tuple (functional name: ``. length: Nominal length of the datapipe Note: The opened file handles will be closed by Python's GC periodically. Users can choose to close them explicitly. Example: >>> # xdoctest: +SKIP >>> from torchdata.datapipes.iter import FileLister, FileOpener, StreamReader >>> dp = FileLister(root=\".\").filter(lambda fname: fname.endswith('.txt')) >>> dp = FileOpener(dp) >>> dp = StreamReader(dp) >>> list(dp) [('./abc.txt', 'abc')]", - "type": "class", - "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\fileopener.py", - "ast_data": "ClassDef name:FileOpenerIterDataPipe Call call:functional_datapipe FunctionDef name:__init__ arguments arg:self arg:datapipe type:Iterable[str] arg:mode type:str arg:encoding type:Optional[str] arg:length type:int If Compare op:NotIn Raise raises:ValueError(f'Invalid mode {mode}') If BoolOp Compare op:In Compare op:IsNot Raise raises:ValueError(\"binary mode doesn't take an encoding argument\") FunctionDef name:__iter__ arguments arg:self FunctionDef name:__len__ arguments arg:self If Compare op:Eq Raise raises:TypeError(f\"{type(self).__name__} instance doesn't have valid length\") Return return:yes" - }, - { - "library": "scikit-learn", - "name": "TreeNode", - "source_code": "class TreeNode: def __init__(self, *, depth, sample_indices, partition_start, partition_stop, sum_gradients, sum_hessians, value = None): self.depth = depth self.sample_indices = sample_indices self.n_samples = sample_indices.shape[0] self.sum_gradients = sum_gradients self.sum_hessians = sum_hessians self.value = value self.is_leaf = False self.allowed_features = None self.interaction_cst_indices = None self.set_children_bounds(float('-inf'), float('+inf')) self.split_info = None self.left_child = None self.right_child = None self.histograms = None self.partition_start = partition_start self.partition_stop = partition_stop def set_children_bounds(self, lower, upper): self.children_lower_bound = lower self.children_upper_bound = upper def __lt__(self, other_node): return self.split_info.gain > other_node.split_info.gain", - "docstring": "Tree Node class used in TreeGrower. This isn't used for prediction purposes, only for training (see TreePredictor). Parameters ---------- depth : int The depth of the node, i.e. its distance from the root. sample_indices : ndarray of shape (n_samples_at_node,), dtype=np.uint32 The indices of the samples at the node. partition_start : int start position of the node's sample_indices in splitter.partition. partition_stop : int stop position of the node's sample_indices in splitter.partition. sum_gradients : float The sum of the gradients of the samples at the node. sum_hessians : float The sum of the hessians of the samples at the node. Attributes ---------- depth : int The depth of the node, i.e. its distance from the root. sample_indices : ndarray of shape (n_samples_at_node,), dtype=np.uint32 The indices of the samples at the node. sum_gradients : float The sum of the gradients of the samples at the node. sum_hessians : float The sum of the hessians of the samples at the node. split_info : SplitInfo or None The result of the split evaluation. is_leaf : bool True if node is a leaf left_child : TreeNode or None The left child of the node. None for leaves. right_child : TreeNode or None The right child of the node. None for leaves. value : float or None The value of the leaf, as computed in finalize_leaf(). None for non-leaf nodes. partition_start : int start position of the node's sample_indices in splitter.partition. partition_stop : int stop position of the node's sample_indices in splitter.partition. allowed_features : None or ndarray, dtype=int Indices of features allowed to split for children. interaction_cst_indices : None or list of ints Indices of the interaction sets that have to be applied on splits of child nodes. The fewer sets the stronger the constraint as fewer sets contain fewer features. children_lower_bound : float children_upper_bound : float", - "type": "class", - "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\grower.py", - "ast_data": "ClassDef name:TreeNode FunctionDef name:__init__ arguments arg:self Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign FunctionDef name:set_children_bounds arguments arg:self arg:lower arg:upper Assign Assign FunctionDef name:__lt__ arguments arg:self arg:other_node Return return:yes" - }, - { - "library": "pytorch", - "name": "synchronize", - "source_code": "def synchronize(device: _device_t = None) -> None: pass", - "docstring": "Waits for all kernels in all streams on the CPU device to complete. Args: device (torch.device or int, optional): ignored, there's only one CPU device. N.B. This function only exists to facilitate device-agnostic code.", - "type": "function", - "file_path": "pytorch\\torch\\cpu\\__init__.py", - "ast_data": "FunctionDef name:synchronize arguments arg:device type:_device_t" - }, - { - "library": "scipy", - "name": "Rosenbrock", - "source_code": "class Rosenbrock(Benchmark): change_dimensionality = True def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-30.0] * self.N, [30.0] * self.N)) self.custom_bounds = [(-2, 2), (-2, 2)] self.global_optimum = [[1 for _ in range(self.N)]] self.fglob = 0.0 def fun(self, x, *args): self.nfev + = 1 return rosen(x)", - "docstring": "Rosenbrock objective function. This class defines the Rosenbrock [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Rosenbrock}}(x) = \\sum_{i=1}^{n-1} [100(x_i^2 - x_{i+1})^2 + (x_i - 1)^2] Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_R.py", - "ast_data": "ClassDef name:Rosenbrock Assign FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Return return:yes" - }, - { - "library": "scipy", - "name": "lagrangian_hessian_x", - "source_code": "def lagrangian_hessian_x(self, z, v): x = self.get_variables(z) v_eq = v[: self.n_eq] v_ineq = v[self.n_eq: self.n_eq + self.n_ineq] lagr_hess = self.lagr_hess return lagr_hess(x, v_eq, v_ineq)", - "docstring": "Returns Lagrangian Hessian (in relation to ) -> Hx", - "type": "method", - "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\tr_interior_point.py", - "ast_data": "FunctionDef name:lagrangian_hessian_x arguments arg:self arg:z arg:v Assign Call call:get_variables Assign Assign Assign Return return:yes" - }, - { - "library": "pandas", - "name": "duplicated", - "source_code": "def duplicated(values: ArrayLike, keep: Literal['first', 'last', False] = 'first', mask: npt.NDArray[np.bool_] | None = None) -> npt.NDArray[np.bool_]: values = _ensure_data(values) return htable.duplicated(values, keep = keep, mask = mask)", - "docstring": "Return boolean ndarray denoting duplicate values. Parameters ---------- values : np.ndarray or ExtensionArray Array over which to check for duplicate values. keep : {'first', 'last', False}, default 'first' - ``. mask : ndarray[bool], optional array indicating which elements to exclude from checking Returns ------- duplicated : ndarray[bool]", - "type": "function", - "file_path": "pandas\\pandas\\core\\algorithms.py", - "ast_data": "FunctionDef name:duplicated arguments arg:values type:ArrayLike arg:keep type:Literal['first', 'last', False] arg:mask type:npt.NDArray[np.bool_] | None Assign Call call:_ensure_data Return return:yes" - }, - { - "library": "pandas", - "name": "__array__", - "source_code": "def __array__(self, dtype: NpDtype | None = None, copy: bool | None = None) -> np.ndarray: if copy is False: if not self._hasna: return np.array(self._data, dtype = dtype, copy = copy) raise ValueError('Unable to avoid copy while creating an array as requested.') if copy is None: copy = False return self.to_numpy(dtype = dtype, copy = copy)", - "docstring": "the array interface, return my values We return an object array here to preserve our scalar values", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\masked.py", - "ast_data": "FunctionDef name:__array__ arguments arg:self arg:dtype type:NpDtype | None arg:copy type:bool | None If Compare op:Is If Return return:yes Raise raises:ValueError('Unable to avoid copy while creating an array as requested.') If Compare op:Is Assign Return return:yes" - }, - { - "library": "scikit-learn", - "name": "inplace_row_scale", - "source_code": "def inplace_row_scale(X, scale): if sp.issparse(X) and X.format = = 'csc': inplace_csr_column_scale(X.T, scale) elif sp.issparse(X) and X.format = = 'csr': inplace_csr_row_scale(X, scale) else: _raise_typeerror(X)", - "docstring": "Inplace row scaling of a CSR or CSC matrix. Scale each row of the data matrix by multiplying with specific scale provided by the caller assuming a (n_samples, n_features) shape. Parameters ---------- X : sparse matrix of shape (n_samples, n_features) Matrix to be scaled. It should be of CSR or CSC format. scale : ndarray of shape (n_features,), dtype={np.float32, np.float64} Array of precomputed sample-wise values to use for scaling. Examples -------- >>> from sklearn.utils import sparsefuncs >>> from scipy import sparse >>> import numpy as np >>> indptr = np.array([0, 2, 3, 4, 5]) >>> indices = np.array([0, 1, 2, 3, 3]) >>> data = np.array([8, 1, 2, 5, 6]) >>> scale = np.array([2, 3, 4, 5]) >>> csr = sparse.csr_matrix((data, indices, indptr)) >>> csr.todense() matrix([[8, 1, 0, 0], [0, 0, 2, 0], [0, 0, 0, 5], [0, 0, 0, 6]]) >>> sparsefuncs.inplace_row_scale(csr, scale) >>> csr.todense() matrix([[16, 2, 0, 0], [ 0, 0, 6, 0], [ 0, 0, 0, 20], [ 0, 0, 0, 30]])", - "type": "function", - "file_path": "scikit-learn\\sklearn\\utils\\sparsefuncs.py", - "ast_data": "FunctionDef name:inplace_row_scale arguments arg:X arg:scale If BoolOp Call call:issparse Compare op:Eq If BoolOp Call call:issparse Compare op:Eq" - }, - { - "library": "pytorch", - "name": "broadcast", - "source_code": "def broadcast(self: torch.Tensor, src: int, group: RANK_TYPES, tag: str = ''): group_name = _resolve_group_name(group, tag) tensor = torch.ops._c10d_functional.broadcast(self, src, group_name) return _maybe_wrap_tensor(tensor)", - "docstring": "Broadcasts the tensor to all processes in the given process group. Args: src (int): Source rank group (ProcessGroup or List[int]): The process group to work on. tag (str, optional): A unique identifier for the collective. Default: empty string", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py", - "ast_data": "FunctionDef name:broadcast arguments arg:self type:torch.Tensor arg:src type:int arg:group type:RANK_TYPES arg:tag type:str Assign Call call:_resolve_group_name Assign Call call:broadcast Return return:yes" - }, - { - "library": "mongo", - "name": "to_list", - "source_code": "@_csot.apply def to_list(self, length: Optional[int] = None) -> list[_DocumentType]: res: list[_DocumentType] = [] remaining = length if isinstance(length, int) and length < 1: raise ValueError('to_list() length must be greater than 0') while self.alive: if not self._next_batch(res, remaining): break if length is not None: remaining = length - len(res) if remaining = = 0: break return res", - "docstring": "Converts the contents of this cursor to a list more efficiently than ``. To use:: >>> cursor.to_list() Or, so read at most n items from the cursor:: >>> cursor.to_list(n) If the cursor is empty or has no more results, an empty list will be returned. .. versionadded:: 4.9", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\command_cursor.py", - "ast_data": "FunctionDef name:to_list arguments arg:self arg:length type:Optional[int] Assign If BoolOp Call call:isinstance Compare op:Lt Raise raises:ValueError('to_list() length must be greater than 0') While If If Compare op:IsNot Assign If Compare op:Eq Return return:yes" - }, - { - "library": "tensorflow", - "name": "outer_graph", - "source_code": "@property def outer_graph(self): return self._outer_graph", - "docstring": "The graph active when this _FuncGraph was created.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py", - "ast_data": "FunctionDef name:outer_graph arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "find_csv_files", - "source_code": "def find_csv_files(path, perf_compare): def is_csv(f): if perf_compare: regex = 'training_(torchbench|huggingface|timm_models)\\\\.csv' return re.match(regex, f) is not None else: return f.endswith('_performance.csv') csv_files = [] for root, dirs, files in os.walk(path): for file in files: if is_csv(file): csv_files.append(os.path.join(root, file)) return csv_files", - "docstring": "Recursively search for all CSV files in directory and subdirectories whose name contains a target string.", - "type": "function", - "file_path": "pytorch\\benchmarks\\dynamo\\summarize_perf.py", - "ast_data": "FunctionDef name:find_csv_files arguments arg:path arg:perf_compare FunctionDef name:is_csv arguments arg:f If Assign Return return:yes Return return:yes Assign For Call call:walk For If Call call:is_csv Return return:yes" - }, - { - "library": "django", - "name": "__init__", - "source_code": "def __init__(self, subject = '', body = '', from_email = None, to = None, bcc = None, connection = None, attachments = None, headers = None, cc = None, reply_to = None): if to: if isinstance(to, str): raise TypeError('\"to\" argument must be a list or tuple') self.to = list(to) else: self.to = [] if cc: if isinstance(cc, str): raise TypeError('\"cc\" argument must be a list or tuple') self.cc = list(cc) else: self.cc = [] if bcc: if isinstance(bcc, str): raise TypeError('\"bcc\" argument must be a list or tuple') self.bcc = list(bcc) else: self.bcc = [] if reply_to: if isinstance(reply_to, str): raise TypeError('\"reply_to\" argument must be a list or tuple') self.reply_to = list(reply_to) else: self.reply_to = [] self.from_email = from_email or settings.DEFAULT_FROM_EMAIL self.subject = subject self.body = body or '' self.attachments = [] if attachments: for attachment in attachments: if isinstance(attachment, MIMEBase): self.attach(attachment) else: self.attach(*attachment) self.extra_headers = headers or {} self.connection = connection", - "docstring": "Initialize a single email message (which can be sent to multiple recipients).", - "type": "method", - "file_path": "django\\django\\core\\mail\\message.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:subject arg:body arg:from_email arg:to arg:bcc arg:connection arg:attachments arg:headers arg:cc arg:reply_to If If Call call:isinstance Raise raises:TypeError('\"to\" argument must be a list or tuple') Assign Call call:list Assign If If Call call:isinstance Raise raises:TypeError('\"cc\" argument must be a list or tuple') Assign Call call:list Assign If If Call call:isinstance Raise raises:TypeError('\"bcc\" argument must be a list or tuple') Assign Call call:list Assign If If Call call:isinstance Raise raises:TypeError('\"reply_to\" argument must be a list or tuple') Assign Call call:list Assign Assign BoolOp Assign Assign BoolOp Assign If For If Call call:isinstance Assign BoolOp Assign" - }, - { - "library": "tensorflow", - "name": "get_link_flags", - "source_code": "@tf_export('sysconfig.get_link_flags') def get_link_flags(): is_mac = _platform.system() = = 'Darwin' ver = _VERSION.split('.')[0] flags = [] if not _MONOLITHIC_BUILD: flags.append('-L%s' % get_lib()) if is_mac: flags.append('-ltensorflow_framework.%s' % ver) else: flags.append('-l: libtensorflow_framework.so.%s' % ver) return flags", - "docstring": "Returns the linker flags for linking with TensorFlow. The returned list of arguments can be passed to the linker for linking against TensorFlow. The result is platform dependent. For example, on a typical Linux system with Python 3.7 the following command prints >>> print(tf.sysconfig.get_link_flags()) Returns: A list of strings for the linker flags.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\platform\\sysconfig.py", - "ast_data": "FunctionDef name:get_link_flags arguments Call call:tf_export Assign Compare op:Eq Assign Assign If If Return return:yes" - }, - { - "library": "numpy", - "name": "find_duplicates", - "source_code": "@array_function_dispatch(_find_duplicates_dispatcher) def find_duplicates(a, key = None, ignoremask = True, return_index = False): a = np.asanyarray(a).ravel() fields = get_fieldstructure(a.dtype) base = a if key: for f in fields[key]: base = base[f] base = base[key] sortidx = base.argsort() sortedbase = base[sortidx] sorteddata = sortedbase.filled() flag = sorteddata[: -1] = = sorteddata[1:] if ignoremask: sortedmask = sortedbase.recordmask flag[sortedmask[1:]] = False flag = np.concatenate(([False], flag)) flag[: -1] = flag[: -1] + flag[1:] duplicates = a[sortidx][flag] if return_index: return (duplicates, sortidx[flag]) else: return duplicates", - "docstring": "Find the duplicates in a structured array along a given key Parameters ---------- a : array-like Input array key : {string, None}, optional Name of the fields along which to check the duplicates. If None, the search is performed by records ignoremask : {True, False}, optional Whether masked data should be discarded or considered as duplicates. return_index : {False, True}, optional Whether to return the indices of the duplicated values. Examples -------- >>> import numpy as np >>> from numpy.lib import recfunctions as rfn >>> ndtype = [('a', int)] >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3], ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype) >>> rfn.find_duplicates(a, ignoremask=True, return_index=True) (masked_array(data=[(1,), (1,), (2,), (2,)], mask=[(False,), (False,), (False,), (False,)], fill_value=(999999,), dtype=[('a', ' 0, axis = 0) return self.classes_.take(np.argmax(pred, axis = 1), axis = 0)", - "docstring": "Predict classes for X. The predicted class of an input sample is computed as the weighted mean prediction of the classifiers in the ensemble. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The training input samples. Sparse matrix can be CSC, CSR, COO, DOK, or LIL. COO, DOK, and LIL are converted to CSR. Returns ------- y : ndarray of shape (n_samples,) The predicted classes.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py", - "ast_data": "FunctionDef name:predict arguments arg:self arg:X Assign Call call:decision_function If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "learn_cache_key", - "source_code": "def learn_cache_key(request, response, cache_timeout = None, key_prefix = None, cache = None): if key_prefix is None: key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX if cache_timeout is None: cache_timeout = settings.CACHE_MIDDLEWARE_SECONDS cache_key = _generate_cache_header_key(key_prefix, request) if cache is None: cache = caches[settings.CACHE_MIDDLEWARE_ALIAS] if response.has_header('Vary'): is_accept_language_redundant = settings.USE_I18N headerlist = [] for header in cc_delim_re.split(response.headers['Vary']): header = header.upper().replace('-', '_') if header ! = 'ACCEPT_LANGUAGE' or not is_accept_language_redundant: headerlist.append('HTTP_' + header) headerlist.sort() cache.set(cache_key, headerlist, cache_timeout) return _generate_cache_key(request, request.method, headerlist, key_prefix) else: cache.set(cache_key, [], cache_timeout) return _generate_cache_key(request, request.method, [], key_prefix)", - "docstring": "Learn what headers to take into account for some request URL from the response object. Store those headers in a global URL registry so that later access to that URL will know what headers to take into account without building the response object itself. The headers are named in the Vary header of the response, but we want to prevent response generation. The list of headers to use for cache key generation is stored in the same cache as the pages themselves. If the cache ages some data out of the cache, this just means that we have to build the response once to get at the Vary header and so at the list of headers to use for the cache key.", - "type": "function", - "file_path": "django\\django\\utils\\cache.py", - "ast_data": "FunctionDef name:learn_cache_key arguments arg:request arg:response arg:cache_timeout arg:key_prefix arg:cache If Compare op:Is Assign If Compare op:Is Assign Assign Call call:_generate_cache_header_key If Compare op:Is Assign If Call call:has_header Assign Assign For Call call:split Assign Call call:replace If BoolOp Compare op:NotEq Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "convert_cmake_value_to_python_value", - "source_code": "def convert_cmake_value_to_python_value(cmake_value: str, cmake_type: str) -> CMakeValue: cmake_type = cmake_type.upper() up_val = cmake_value.upper() if cmake_type = = 'BOOL': return not (up_val in ('FALSE', 'OFF', 'N', 'NO', '0', '', 'NOTFOUND') or up_val.endswith('-NOTFOUND')) elif cmake_type = = 'FILEPATH': if up_val.endswith('-NOTFOUND'): return None else: return cmake_value else: return cmake_value", - "docstring": "Convert a CMake value in a string form to a Python value. Args: cmake_value (string): The CMake value in a string form (e.g., \"ON\", \"OFF\", \"1\"). cmake_type (string): The CMake type of :attr:. Returns: A Python value corresponding to :attr: with type :attr:.", - "type": "function", - "file_path": "pytorch\\tools\\setup_helpers\\cmake_utils.py", - "ast_data": "FunctionDef name:convert_cmake_value_to_python_value arguments arg:cmake_value type:str arg:cmake_type type:str Assign Call call:upper Assign Call call:upper If Compare op:Eq Return return:yes If Compare op:Eq If Call call:endswith Return return:yes Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "get_hook_stubs", - "source_code": "def get_hook_stubs(nn_module): check_module_initialized(nn_module) hook_map: dict = {} hook_stubs = [] for hook in nn_module._forward_hooks.values(): if hook.__name__ in hook_map: if id(hook) ! = id(hook_map[hook.__name__]): raise RuntimeError(f\"Hook '{hook.__name__}' on {type(nn_module).__name__} has at least two different python definitions. Please use unique names for all hooks.\") else: hook_map[hook.__name__] = hook hook_stubs.append(make_stub(hook, hook.__name__)) pre_hook_stubs = [] for pre_hook in nn_module._forward_pre_hooks.values(): if pre_hook.__name__ in hook_map: if id(pre_hook) ! = id(hook_map[pre_hook.__name__]): raise RuntimeError(f\"Pre-hook '{pre_hook.__name__}' on {type(nn_module).__name__} has at least two different python definitions. Please use unique names for all hooks.\") else: hook_map[pre_hook.__name__] = pre_hook pre_hook_stubs.append(make_stub(pre_hook, pre_hook.__name__)) return (hook_stubs, pre_hook_stubs)", - "docstring": "Return forward hook and pre_hook ScriptModuleStubs.", - "type": "function", - "file_path": "pytorch\\torch\\jit\\_recursive.py", - "ast_data": "FunctionDef name:get_hook_stubs arguments arg:nn_module Assign For Call call:values If Compare op:In If Compare op:NotEq Raise raises:RuntimeError(f\"Hook '{hook.__name__}' on {type(nn_module).__name__} has at least two different python definitions. Please use unique names for all hooks.\") Assign Assign For Call call:values If Compare op:In If Compare op:NotEq Raise raises:RuntimeError(f\"Pre-hook '{pre_hook.__name__}' on {type(nn_module).__name__} has at least two different python definitions. Please use unique names for all hooks.\") Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "find_guarded_entry", - "source_code": "@classmethod def find_guarded_entry(cls: type[GuardedCache[T]], key: str, local: bool, remote_cache: Optional[RemoteCache[JsonDataTy]], evaluate_guards: Callable[[str, Union[list[int], list[torch.SymInt]]], bool], hints: list[int]) -> tuple[Optional[T], Optional[bytes], dict[str, str]]: graph = None pickled_content = None result_status = 'full_miss' sample_guards_expr = None for candidate, content in cls.iterate_over_candidates(local, remote_cache, key): assert hasattr(candidate, 'guards_expr') if not candidate.guards_expr: graph = candidate pickled_content = content result_status = 'hit' break hit = bool(evaluate_guards(candidate.guards_expr, hints)) if hit: graph = candidate pickled_content = content result_status = 'hit' sample_guards_expr = candidate.guards_expr break else: result_status = 'guard_miss' sample_guards_expr = candidate.guards_expr info = {'cache_status_detailed': result_status} if sample_guards_expr is not None: info['cache_status_guard_expr'] = sample_guards_expr return (graph, pickled_content, info)", - "docstring": "Find the first cache entry in iterate_over_candidates that passes . Args: key: The cache key to look up local: Whether to check the local cache remote_cache: The remote cache to check, if any evaluate_guards: Function that evaluates whether a guard passes the check, given a list of hint values and the guard expression. hints: List of symint hints paired with evaluate_guards Returns: A tuple of (graph, pickled_content) if found, or (None, None) if not found", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codecache.py", - "ast_data": "FunctionDef name:find_guarded_entry arguments arg:cls type:type[GuardedCache[T]] arg:key type:str arg:local type:bool arg:remote_cache type:Optional[RemoteCache[JsonDataTy]] arg:evaluate_guards type:Callable[[str, Union[list[int], list[torch.SymInt]]], bool] arg:hints type:list[int] Assign Assign Assign Assign For Call call:iterate_over_candidates If Assign Assign Assign Assign Call call:bool If Assign Assign Assign Assign Assign Assign Assign If Compare op:IsNot Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "outbound_nodes", - "source_code": "@property @doc_controls.do_not_doc_inheritable def outbound_nodes(self): return self._outbound_nodes", - "docstring": "Deprecated, do NOT use! Only for compatibility with external Keras.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", - "ast_data": "FunctionDef name:outbound_nodes arguments arg:self Return return:yes" - }, - { - "library": "kornia", - "name": "detect", - "source_code": "@torch.inference_mode() def detect(self, images: Tensor, n: Optional[int] = 10000, apply_imagenet_normalization: bool = True, pad_if_not_divisible: bool = True, crop_h: Optional[int] = None, crop_w: Optional[int] = None) -> Tuple[Tensor, Tensor]: KORNIA_CHECK_SHAPE(images, ['B', '3', 'H', 'W']) self.train(False) B, C, H, W = images.shape if pad_if_not_divisible: h, w = images.shape[2:] pd_h = 14 - h % 14 if h % 14 > 0 else 0 pd_w = 14 - w % 14 if w % 14 > 0 else 0 images = torch.nn.functional.pad(images, (0, pd_w, 0, pd_h), value = 0.0) if apply_imagenet_normalization: images = self.normalizer(images) logits = self.detector.forward(images) logits = logits[..., : H, : W] if crop_h is not None and crop_w is not None: logits = logits[..., : crop_h, : crop_w] H, W = (crop_h, crop_w) scoremap = logits.reshape(B, H * W).softmax(dim = -1).reshape(B, H, W) keypoints, confidence = sample_keypoints(scoremap, num_samples = n) return (keypoints, confidence)", - "docstring": "Detect keypoints in the input images. Args: images: A tensor of shape :math: containing the input images. n: The number of keypoints to detect. apply_imagenet_normalization: Whether to apply ImageNet normalization to the input images. pad_if_not_divisible: pad image shape if not evenly divisible. crop_h: The height of the crop to be used for detection. If None, the full image is used. crop_w: The width of the crop to be used for detection. If None, the full image is used. Returns: keypoints: A tensor of shape :math: containing the detected keypoints, normalized to the range :math:. scores: A tensor of shape :math: containing the scores of the detected keypoints.", - "type": "method", - "file_path": "kornia\\kornia\\feature\\dedode\\dedode.py", - "ast_data": "FunctionDef name:detect arguments arg:self arg:images type:Tensor arg:n type:Optional[int] arg:apply_imagenet_normalization type:bool arg:pad_if_not_divisible type:bool arg:crop_h type:Optional[int] arg:crop_w type:Optional[int] Call call:inference_mode Assign If Assign Assign Assign Assign Call call:pad If Assign Call call:normalizer Assign Call call:forward Assign If BoolOp Compare op:IsNot Compare op:IsNot Assign Assign Assign Call call:reshape Assign Call call:sample_keypoints Return return:yes" - }, - { - "library": "pandas", - "name": "to_clipboard", - "source_code": "def to_clipboard(obj, excel: bool | None = True, sep: str | None = None, **kwargs) -> None: encoding = kwargs.pop('encoding', 'utf-8') if encoding is not None and encoding.lower().replace('-', '') ! = 'utf8': raise ValueError('clipboard only supports utf-8 encoding') from pandas.io.clipboard import clipboard_set if excel is None: excel = True if excel: try: if sep is None: sep = '\\t' buf = StringIO() obj.to_csv(buf, sep = sep, encoding = 'utf-8', **kwargs) text = buf.getvalue() clipboard_set(text) return except TypeError: warnings.warn('to_clipboard in excel mode requires a single character separator.', stacklevel = find_stack_level()) elif sep is not None: warnings.warn('to_clipboard with excel = False ignores the sep argument.', stacklevel = find_stack_level()) if isinstance(obj, ABCDataFrame): with option_context('display.max_colwidth', None): objstr = obj.to_string(**kwargs) else: objstr = str(obj) clipboard_set(objstr)", - "docstring": "Attempt to write text representation of object to the system clipboard The clipboard can be then pasted into Excel for example. Parameters ---------- obj : the object to write to the clipboard excel : bool, defaults to True if True, use the provided separator, writing in a csv format for allowing easy pasting into excel. if False, write a string representation of the object to the clipboard sep : optional, defaults to tab other keywords are passed to to_csv Notes ----- Requirements for your platform - Linux: xclip, or xsel (with PyQt4 modules) - Windows: - OS X:", - "type": "function", - "file_path": "pandas\\pandas\\io\\clipboards.py", - "ast_data": "FunctionDef name:to_clipboard arguments arg:obj arg:excel type:bool | None arg:sep type:str | None kwarg:kwargs Assign Call call:pop If BoolOp Compare op:IsNot Compare op:NotEq Raise raises:ValueError('clipboard only supports utf-8 encoding') If Compare op:Is Assign If Try If Compare op:Is Assign Assign Call call:StringIO Assign Call call:getvalue Return return:no ExceptHandler If Compare op:IsNot If Call call:isinstance With Assign Call call:to_string Assign Call call:str" - }, - { - "library": "scrapy", - "name": "load", - "source_code": "def load(self, spider_name: str) -> type[Spider]: try: return self._spiders[spider_name] except KeyError: raise KeyError(f'Spider not found: {spider_name}')", - "docstring": "Return the Spider class for the given spider name. If the spider name is not found, raise a KeyError.", - "type": "method", - "file_path": "scrapy\\scrapy\\spiderloader.py", - "ast_data": "FunctionDef name:load arguments arg:self arg:spider_name type:str Try Return return:yes ExceptHandler Raise raises:KeyError(f'Spider not found: {spider_name}')" - }, - { - "library": "django", - "name": "get_object_or_404", - "source_code": "def get_object_or_404(klass, *args, **kwargs): queryset = _get_queryset(klass) if not hasattr(queryset, 'get'): klass__name = klass.__name__ if isinstance(klass, type) else klass.__class__.__name__ raise ValueError(\"First argument to get_object_or_404() must be a Model, Manager, or QuerySet, not '%s'.\" % klass__name) try: return queryset.get(*args, **kwargs) except queryset.model.DoesNotExist: raise Http404('No %s matches the given query.' % queryset.model._meta.object_name)", - "docstring": "Use get() to return an object, or raise an Http404 exception if the object does not exist. klass may be a Model, Manager, or QuerySet object. All other passed arguments and keyword arguments are used in the get() query. Like with QuerySet.get(), MultipleObjectsReturned is raised if more than one object is found.", - "type": "function", - "file_path": "django\\django\\shortcuts.py", - "ast_data": "FunctionDef name:get_object_or_404 arguments arg:klass vararg:args kwarg:kwargs Assign Call call:_get_queryset If Assign Raise raises:ValueError(\"First argument to get_object_or_404() must be a Model, Manager, or QuerySet, not '%s'.\" % klass__name) Try Return return:yes ExceptHandler Raise raises:Http404('No %s matches the given query.' % queryset.model._meta.object_name)" - }, - { - "library": "tensorflow", - "name": "cleanup_makefile", - "source_code": "def cleanup_makefile(): makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow', 'contrib', 'makefile', 'downloads') if os.path.isdir(makefile_download_dir): for root, _, filenames in os.walk(makefile_download_dir): for f in filenames: if f.endswith('BUILD'): os.remove(os.path.join(root, f))", - "docstring": "Delete any leftover BUILD files from the Makefile build. These files could interfere with Bazel parsing.", - "type": "function", - "file_path": "tensorflow\\configure.py", - "ast_data": "FunctionDef name:cleanup_makefile arguments Assign Call call:join If Call call:isdir For Call call:walk For If Call call:endswith" - }, - { - "library": "django", - "name": "get_version_tuple", - "source_code": "def get_version_tuple(version): version_numbers = [] for item in version_component_re.split(version): if item and item ! = '.': try: component = int(item) except ValueError: break else: version_numbers.append(component) return tuple(version_numbers)", - "docstring": "Return a tuple of version numbers (e.g. (1, 2, 3)) from the version string (e.g. '1.2.3').", - "type": "function", - "file_path": "django\\django\\utils\\version.py", - "ast_data": "FunctionDef name:get_version_tuple arguments arg:version Assign For Call call:split If BoolOp Compare op:NotEq Try Assign Call call:int ExceptHandler Return return:yes" - }, - { - "library": "django", - "name": "JSONCatalog", - "source_code": "class JSONCatalog(JavaScriptCatalog): def render_to_response(self, context, **response_kwargs): return JsonResponse(context)", - "docstring": "Return the selected language catalog as a JSON object. Receive the same parameters as JavaScriptCatalog and return a response with a JSON object of the following format: { \"catalog\": { # Translations catalog }, \"formats\": { # Language formats for date, time, etc. }, \"plural\": '...' # Expression for plural forms, or null. }", - "type": "class", - "file_path": "django\\django\\views\\i18n.py", - "ast_data": "ClassDef name:JSONCatalog FunctionDef name:render_to_response arguments arg:self arg:context kwarg:response_kwargs Return return:yes" - }, - { - "library": "pytorch", - "name": "RuntimeSchemaInfo", - "source_code": "@dataclass class RuntimeSchemaInfo: static_argnum: int = 100 static_kwargkey: Optional[list[str]] = None needs_pytree: bool = False", - "docstring": "RuntimeSchemaInfo stores the operator schema related information for runtime (eager) execution. This is mainly used for two ways: 1. to generate hash for args to determine whether to re-run sharding prop or not 2. to determine if we need pytree", - "type": "class", - "file_path": "pytorch\\torch\\distributed\\tensor\\_op_schema.py", - "ast_data": "ClassDef name:RuntimeSchemaInfo" - }, - { - "library": "scikit-learn", - "name": "inverse_transform", - "source_code": "def inverse_transform(self, X): check_is_fitted(self) if 'onehot' in self.encode: X = self._encoder.inverse_transform(X) Xinv = check_array(X, copy = True, dtype = (np.float64, np.float32)) n_features = self.n_bins_.shape[0] if Xinv.shape[1] ! = n_features: raise ValueError('Incorrect number of features. Expecting {}, received {}.'.format(n_features, Xinv.shape[1])) for jj in range(n_features): bin_edges = self.bin_edges_[jj] bin_centers = (bin_edges[1:] + bin_edges[: -1]) * 0.5 Xinv[:, jj] = bin_centers[Xinv[:, jj].astype(np.int64)] return Xinv", - "docstring": "Transform discretized data back to original feature space. Note that this function does not regenerate the original data due to discretization rounding. Parameters ---------- X : array-like of shape (n_samples, n_features) Transformed data in the binned space. Returns ------- X_original : ndarray, dtype={np.float32, np.float64} Data in the original feature space.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\preprocessing\\_discretization.py", - "ast_data": "FunctionDef name:inverse_transform arguments arg:self arg:X If Compare op:In Assign Call call:inverse_transform Assign Call call:check_array Assign If Compare op:NotEq Raise raises:ValueError('Incorrect number of features. Expecting {}, received {}.'.format(n_features, Xinv.shape[1])) For Call call:range Assign Assign Assign Return return:yes" - }, - { - "library": "numpy", - "name": "imag", - "source_code": "@array_function_dispatch(_imag_dispatcher) def imag(val): try: return val.imag except AttributeError: return asanyarray(val).imag", - "docstring": "Return the imaginary part of the complex argument. Parameters ---------- val : array_like Input array. Returns ------- out : ndarray or scalar The imaginary component of the complex argument. If is real, the type of is used for the output. If has complex elements, the returned type is float. See Also -------- real, angle, real_if_close Examples -------- >>> import numpy as np >>> a = np.array([1+2j, 3+4j, 5+6j]) >>> a.imag array([2., 4., 6.]) >>> a.imag = np.array([8, 10, 12]) >>> a array([1. +8.j, 3.+10.j, 5.+12.j]) >>> np.imag(1 + 1j) 1.0", - "type": "function", - "file_path": "numpy\\numpy\\lib\\_type_check_impl.py", - "ast_data": "FunctionDef name:imag arguments arg:val Call call:array_function_dispatch Try Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "tensorflow", - "name": "copy_file", - "source_code": "def copy_file(src_file: str, dst_dir: str, strip: str = None, dest_file: str = None) -> None: dest = dest_file if dest_file else src_file if dest.startswith('bazel-out'): dest = dest[dest.index('bin') + 4:] if strip: dest = dest.removeprefix(strip) dest_dir_path = os.path.join(dst_dir, os.path.dirname(dest)) os.makedirs(dest_dir_path, exist_ok = True) shutil.copy(src_file, dest_dir_path) os.chmod(os.path.join(dst_dir, dest), 420)", - "docstring": "Copy a file to the destination directory. Args: src_file: file to be copied dst_dir: destination directory strip: prefix to strip before copying to destination dest_file: destanation file location if different from src_file", - "type": "function", - "file_path": "tensorflow\\tensorflow\\tools\\pip_package\\utils\\utils.py", - "ast_data": "FunctionDef name:copy_file arguments arg:src_file type:str arg:dst_dir type:str arg:strip type:str arg:dest_file type:str Assign If Call call:startswith Assign If Assign Call call:removeprefix Assign Call call:join" - }, - { - "library": "matplotlib", - "name": "get_index_label_pos", - "source_code": "def get_index_label_pos(index, extent, origin, inverted_xindex): if extent is None: extent = lookup_extent(origin) left, right, bottom, top = extent x, y = index_to_coordinate(index, extent, origin) is_x0 = index[-2:] = = '0]' halign = 'left' if is_x0 ^ inverted_xindex else 'right' hshift = 0.5 * np.sign(left - right) x + = hshift * (1 if is_x0 else -1) return (x, y, halign)", - "docstring": "Return the desired position and horizontal alignment of an index label.", - "type": "function", - "file_path": "matplotlib\\galleries\\users_explain\\artists\\imshow_extent.py", - "ast_data": "FunctionDef name:get_index_label_pos arguments arg:index arg:extent arg:origin arg:inverted_xindex If Compare op:Is Assign Call call:lookup_extent Assign Assign Call call:index_to_coordinate Assign Compare op:Eq Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_task_states", - "source_code": "def get_task_states(self, job_configs): if self._context_handle: job_names, task_nums = zip(*job_configs) return pywrap_tfe.TFE_GetTaskStates(self._context_handle, job_names, task_nums) else: raise ValueError('Context is not initialized.')", - "docstring": "Get task states from the Coordination Service. Args: job_configs: A list of tuples of job name and task number. Returns: A list of TF_Status.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", - "ast_data": "FunctionDef name:get_task_states arguments arg:self arg:job_configs If Assign Call call:zip Return return:yes Raise raises:ValueError('Context is not initialized.')" - }, - { - "library": "pytorch", - "name": "codegen_body", - "source_code": "def codegen_body(self) -> None: if self.multistage_reduction_entry: with self.body.indent(): self.body.splice(self.loads) self.body.splice(self.compute) self.body.writeline('}') self.cse.invalidate(OrderedSet(self.cse.reduction_cache.values())) self.multistage_reduction_entry.cache_clear() self.multistage_reduction_entry = None else: self.body.splice(self.loads) self.body.splice(self.compute) self.body.splice(self.stores) self.loads.clear() self.compute.clear() self.stores.clear()", - "docstring": "Concat output code from index_code, loads, compute, stores, suffix into self.body. For pointwise kernels, this is called just once at the end. For reduction kernels, this generates a loop over the reduction axis.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\mps.py", - "ast_data": "FunctionDef name:codegen_body arguments arg:self If With Assign" - }, - { - "library": "scipy", - "name": "scoreatpercentile", - "source_code": "def scoreatpercentile(data, per, limit = (), alphap = 0.4, betap = 0.4): if per < 0 or per > 100.0: raise ValueError(f'The percentile should be between 0. and 100. ! (got {per})') return mquantiles(data, prob = [per / 100.0], alphap = alphap, betap = betap, limit = limit, axis = 0).squeeze()", - "docstring": "Calculate the score at the given 'per' percentile of the sequence a. For example, the score at per=50 is the median. This function is a shortcut to mquantile", - "type": "function", - "file_path": "scipy\\scipy\\stats\\_mstats_basic.py", - "ast_data": "FunctionDef name:scoreatpercentile arguments arg:data arg:per arg:limit arg:alphap arg:betap If BoolOp Compare op:Lt Compare op:Gt Raise raises:ValueError(f'The percentile should be between 0. and 100. ! (got {per})') Return return:yes" - }, - { - "library": "pytorch", - "name": "change_current_allocator", - "source_code": "def change_current_allocator(allocator: _CUDAAllocator) -> None: torch._C._cuda_changeCurrentAllocator(allocator.allocator())", - "docstring": "Change the currently used memory allocator to be the one provided. If the current allocator has already been used/initialized, this function will error. Args: allocator (torch.cuda.memory._CUDAAllocator): allocator to be set as the active one. .. note:: See :ref: for details on creating and using a custom allocator", - "type": "function", - "file_path": "pytorch\\torch\\cuda\\memory.py", - "ast_data": "FunctionDef name:change_current_allocator arguments arg:allocator type:_CUDAAllocator" - }, - { - "library": "authlib", - "name": "parse_id_token", - "source_code": "async def parse_id_token(self, token, nonce, claims_options = None, claims_cls = None, leeway = 120): claims_params = dict(nonce = nonce, client_id = self.client_id) if claims_cls is None: if 'access_token' in token: claims_params['access_token'] = token['access_token'] claims_cls = CodeIDToken else: claims_cls = ImplicitIDToken metadata = await self.load_server_metadata() if claims_options is None and 'issuer' in metadata: claims_options = {'iss': {'values': [metadata['issuer']]}} alg_values = metadata.get('id_token_signing_alg_values_supported') if not alg_values: alg_values = ['RS256'] jwt = JsonWebToken(alg_values) jwk_set = await self.fetch_jwk_set() try: claims = jwt.decode(token['id_token'], key = JsonWebKey.import_key_set(jwk_set), claims_cls = claims_cls, claims_options = claims_options, claims_params = claims_params) except ValueError: jwk_set = await self.fetch_jwk_set(force = True) claims = jwt.decode(token['id_token'], key = JsonWebKey.import_key_set(jwk_set), claims_cls = claims_cls, claims_options = claims_options, claims_params = claims_params) if claims.get('nonce_supported') is False: claims.params['nonce'] = None claims.validate(leeway = leeway) return UserInfo(claims)", - "docstring": "Return an instance of UserInfo from token's ``.", - "type": "method", - "file_path": "authlib\\authlib\\integrations\\base_client\\async_openid.py", - "ast_data": "AsyncFunctionDef name:parse_id_token arguments arg:self arg:token arg:nonce arg:claims_options arg:claims_cls arg:leeway Assign Call call:dict If Compare op:Is If Compare op:In Assign Assign Assign Assign If BoolOp Compare op:Is Compare op:In Assign Assign Call call:get If Assign Assign Call call:JsonWebToken Assign Try Assign Call call:decode ExceptHandler Assign Assign Call call:decode If Compare op:Is Assign Return return:yes" - }, - { - "library": "django", - "name": "unique_kwargs", - "source_code": "def unique_kwargs(self, kwargs): if isinstance(self.unique, str): return {self.unique: kwargs[self.unique]} else: return {fld: kwargs[fld] for fld in self.unique}", - "docstring": "Given the feature keyword arguments (from ), construct and return the uniqueness keyword arguments -- a subset of the feature kwargs.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\utils\\layermapping.py", - "ast_data": "FunctionDef name:unique_kwargs arguments arg:self arg:kwargs If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "tf2sos", - "source_code": "def tf2sos(b, a, pairing = None, *, analog = False): return zpk2sos(*tf2zpk(b, a), pairing = pairing, analog = analog)", - "docstring": "Return second-order sections from transfer function representation Parameters ---------- b : array_like Numerator polynomial coefficients. a : array_like Denominator polynomial coefficients. pairing : {None, 'nearest', 'keep_odd', 'minimal'}, optional The method to use to combine pairs of poles and zeros into sections. See for information and restrictions on and arguments. analog : bool, optional If True, system is analog, otherwise discrete. .. versionadded:: 1.8.0 Returns ------- sos : ndarray Array of second-order filter coefficients, with shape `sosfilt` for the SOS filter format specification. See Also -------- zpk2sos, sosfilt Notes ----- It is generally discouraged to convert from TF to SOS format, since doing so usually will not improve numerical precision errors. Instead, consider designing filters in ZPK format and converting directly to SOS. TF is converted to SOS by first converting to ZPK format, then converting ZPK to SOS. .. versionadded:: 0.16.0 Examples -------- Find the 'sos' (second-order sections) of the transfer function H(s) using its polynomial representation. .. math:: H(s) = \\frac{s^2 - 3.5s - 2}{s^4 + 3s^3 - 15s^2 - 19s + 30} >>> from scipy.signal import tf2sos >>> tf2sos([1, -3.5, -2], [1, 3, -15, -19, 30], analog=True) array([[ 0. , 0. , 1. , 1. , 2. , -15. ], [ 1. , -3.5, -2. , 1. , 1. , -2. ]])", - "type": "function", - "file_path": "scipy\\scipy\\signal\\_filter_design.py", - "ast_data": "FunctionDef name:tf2sos arguments arg:b arg:a arg:pairing Return return:yes" - }, - { - "library": "pytorch", - "name": "count_prefix", - "source_code": "def count_prefix(self, prefix: str) -> int: return sum((1 for record in self.archive_file.get_all_written_records() if record.startswith(prefix)))", - "docstring": "Count the number of records that start with a given prefix.", - "type": "method", - "file_path": "pytorch\\torch\\export\\pt2_archive\\_package.py", - "ast_data": "FunctionDef name:count_prefix arguments arg:self arg:prefix type:str Return return:yes" - }, - { - "library": "sphinx", - "name": "IndexEntry", - "source_code": "class IndexEntry(NamedTuple): name: str subtype: int docname: str anchor: str extra: str qualifier: str descr: str", - "docstring": "An index entry. .. note:: The *qualifier* and *description* are not rendered for some output formats, such as LaTeX.", - "type": "class", - "file_path": "sphinx\\sphinx\\domains\\_index.py", - "ast_data": "ClassDef name:IndexEntry" - }, - { - "library": "tensorflow", - "name": "map_missing_dict_keys", - "source_code": "def map_missing_dict_keys(y_pred, struct): if not isinstance(y_pred, dict) or not isinstance(struct, dict): return struct for k in y_pred.keys(): if k not in struct: struct[k] = None return struct", - "docstring": "Replaces missing dict keys in with placeholders.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\compile_utils.py", - "ast_data": "FunctionDef name:map_missing_dict_keys arguments arg:y_pred arg:struct If BoolOp Return return:yes For Call call:keys If Compare op:NotIn Assign Return return:yes" - }, - { - "library": "uvicorn", - "name": "signal_handler", - "source_code": "def signal_handler(self, sig: int, frame: FrameType | None) -> None: if sys.platform = = 'win32' and self.is_restarting: self.is_restarting = False else: self.should_exit.set()", - "docstring": "A signal handler that is registered with the parent process.", - "type": "method", - "file_path": "uvicorn\\uvicorn\\supervisors\\basereload.py", - "ast_data": "FunctionDef name:signal_handler arguments arg:self arg:sig type:int arg:frame type:FrameType | None If BoolOp Compare op:Eq Assign" - }, - { - "library": "matplotlib", - "name": "add_child_axes", - "source_code": "def add_child_axes(self, ax): ax._axes = self ax.stale_callback = martist._stale_axes_callback self.child_axes.append(ax) ax._remove_method = functools.partial(self.get_figure(root = False)._remove_axes, owners = [self.child_axes]) self.stale = True return ax", - "docstring": "Add an to the Axes' children; return the child Axes. This is the lowlevel version. See .", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", - "ast_data": "FunctionDef name:add_child_axes arguments arg:self arg:ax Assign Assign Assign Call call:partial Assign Return return:yes" - }, - { - "library": "mongo", - "name": "wait_queue_timeout", - "source_code": "@property def wait_queue_timeout(self) -> Optional[int]: return self.__wait_queue_timeout", - "docstring": "How long a thread will wait for a socket from the pool if the pool has no free sockets.", - "type": "method", - "file_path": "mongo\\pymongo\\pool_options.py", - "ast_data": "FunctionDef name:wait_queue_timeout arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "shapes", - "source_code": "@property def shapes(self): return self._shapes", - "docstring": "The list of shapes for each component of a staging area element.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py", - "ast_data": "FunctionDef name:shapes arguments arg:self Return return:yes" - }, - { - "library": "pandas", - "name": "construct_array_type", - "source_code": "@classmethod def construct_array_type(cls) -> type[IntegerArray]: return IntegerArray", - "docstring": "Return the array type associated with this dtype. Returns ------- type", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\integer.py", - "ast_data": "FunctionDef name:construct_array_type arguments arg:cls Return return:yes" - }, - { - "library": "pytorch", - "name": "stream", - "source_code": "def stream(stream: Optional['torch.mtia.Stream']) -> StreamContext: return StreamContext(stream)", - "docstring": "Wrap around the Context-manager StreamContext that selects a given stream. Arguments: stream (Stream): selected stream. This manager is a no-op if it's ``. .. note:: In eager mode stream is of type Stream class while in JIT it doesn't support torch.mtia.stream", - "type": "function", - "file_path": "pytorch\\torch\\mtia\\__init__.py", - "ast_data": "FunctionDef name:stream arguments arg:stream type:Optional['torch.mtia.Stream'] Return return:yes" - }, - { - "library": "pygame", - "name": "set_instrument", - "source_code": "def set_instrument(self, instrument_id, channel = 0): if not 0 < = instrument_id < = 127: raise ValueError(f'Undefined instrument id: {instrument_id}') if not 0 < = channel < = 15: raise ValueError('Channel not between 0 and 15.') self.write_short(192 + channel, instrument_id)", - "docstring": "select an instrument for a channel, with a value between 0 and 127 Output.set_instrument(instrument_id, channel=0) Also called \"patch change\" or \"program change\".", - "type": "method", - "file_path": "pygame\\src_py\\midi.py", - "ast_data": "FunctionDef name:set_instrument arguments arg:self arg:instrument_id arg:channel If Raise raises:ValueError(f'Undefined instrument id: {instrument_id}') If Raise raises:ValueError('Channel not between 0 and 15.')" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, call_collection, call_fn, name, input_signature): self.call_collection = call_collection self.input_signature = input_signature self.wrapped_call = def_function.function(layer_call_wrapper(call_collection, call_fn, name), input_signature = input_signature) self.original_layer_call = call_collection.layer_call_method", - "docstring": "Initializes a LayerCall object. Args: call_collection: a LayerCallCollection, which contains the other layer call functions (e.g. call_with_conditional_losses, call). These functions should be traced with the same arguments. call_fn: A call function. name: Name of the call function. input_signature: Input signature of call_fn (can be None).", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:call_collection arg:call_fn arg:name arg:input_signature Assign Assign Assign Call call:function Assign" - }, - { - "library": "tensorflow", - "name": "get_all_v2_names", - "source_code": "def get_all_v2_names(): v2_names = set() def visit(unused_path, unused_parent, children): for child in children: _, attr = tf_decorator.unwrap(child[1]) api_names_v2 = tf_export.get_v2_names(attr) for name in api_names_v2: v2_names.add(name) visitor = public_api.PublicAPIVisitor(visit) visitor.do_not_descend_map['tf'].append('contrib') visitor.private_map['tf.compat'] = ['v1', 'v2'] traverse.traverse(tf.compat.v2, visitor) return v2_names", - "docstring": "Get a set of function/class names available in TensorFlow 2.0.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\tools\\compatibility\\update\\generate_v2_renames_map.py", - "ast_data": "FunctionDef name:get_all_v2_names arguments Assign Call call:set FunctionDef name:visit arguments arg:unused_path arg:unused_parent arg:children For Assign Call call:unwrap Assign Call call:get_v2_names For Assign Call call:PublicAPIVisitor Assign Return return:yes" - }, - { - "library": "mongo", - "name": "__init__", - "source_code": "def __init__(self, client: Optional[MongoClient[_DocumentTypeArg]], key_vault_coll: Collection[_DocumentTypeArg], mongocryptd_client: Optional[MongoClient[_DocumentTypeArg]], opts: AutoEncryptionOpts): self.client_ref: Any if client is not None: self.client_ref = weakref.ref(client) else: self.client_ref = None self.key_vault_coll: Optional[Collection[RawBSONDocument]] = cast(Collection[RawBSONDocument], key_vault_coll.with_options(codec_options = _KEY_VAULT_OPTS, read_concern = ReadConcern(level = 'majority'), write_concern = WriteConcern(w = 'majority'))) self.mongocryptd_client = mongocryptd_client self.opts = opts self._spawned = False self._kms_ssl_contexts = opts._kms_ssl_contexts(_IS_SYNC)", - "docstring": "Internal class to perform I/O on behalf of pymongocrypt.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\encryption.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:client type:Optional[MongoClient[_DocumentTypeArg]] arg:key_vault_coll type:Collection[_DocumentTypeArg] arg:mongocryptd_client type:Optional[MongoClient[_DocumentTypeArg]] arg:opts type:AutoEncryptionOpts If Compare op:IsNot Assign Call call:ref Assign Assign Assign Assign Assign Call call:_kms_ssl_contexts" - }, - { - "library": "pytorch", - "name": "nodes_filter", - "source_code": "def nodes_filter(nodes: list[torch.fx.Node], node_call_back) -> list[torch.fx.Node]: return [node for node in nodes if node_call_back(node)]", - "docstring": "Returns the nodes that match the node_call_back as a list.", - "type": "function", - "file_path": "pytorch\\torch\\_export\\utils.py", - "ast_data": "FunctionDef name:nodes_filter arguments arg:nodes type:list[torch.fx.Node] arg:node_call_back Return return:yes" - }, - { - "library": "tensorflow", - "name": "write_object_proto", - "source_code": "def write_object_proto(var, proto, options): if options.experimental_variable_policy._expand_distributed_variables(): for var in var.values: var_proto = proto.variable.experimental_distributed_variable_components.add() var_proto.name = var.name.split(': ')[0] var_proto.device = var.device", - "docstring": "Update a SavedObject proto for the caller. If a DistributedVariable object supports this method, it will be called when saving with a pre-built proto representing the object, plus an instance of . This method is then free to modify that proto instance. with or synchronization optionally write out information about their components to the field of a (depending on the variable policy). Args: var: The DistributedVariable object. proto: A pre-built proto for this object. It is assumed this will be a instance. options: A instance.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\values_util.py", - "ast_data": "FunctionDef name:write_object_proto arguments arg:var arg:proto arg:options If Call call:_expand_distributed_variables For Assign Call call:add Assign Assign" - }, - { - "library": "pandas", - "name": "right", - "source_code": "@cache_readonly def right(self) -> Index: return Index(self._data.right, copy = False)", - "docstring": "Return right bounds of the intervals in the IntervalIndex. The right bounds of each interval in the IntervalIndex are returned as an Index. The datatype of the right bounds is the same as the datatype of the endpoints of the intervals. Returns ------- Index An Index containing the right bounds of the intervals. See Also -------- IntervalIndex.left : Return the left bounds of the intervals in the IntervalIndex. IntervalIndex.mid : Return the mid-point of the intervals in the IntervalIndex. IntervalIndex.length : Return the length of the intervals in the IntervalIndex. Examples -------- >>> iv_idx = pd.IntervalIndex.from_arrays([1, 2, 3], [4, 5, 6], closed=\"right\") >>> iv_idx.right Index([4, 5, 6], dtype='int64') >>> iv_idx = pd.IntervalIndex.from_tuples( ... [(1, 4), (2, 5), (3, 6)], closed=\"left\" ... ) >>> iv_idx.right Index([4, 5, 6], dtype='int64')", - "type": "method", - "file_path": "pandas\\pandas\\core\\indexes\\interval.py", - "ast_data": "FunctionDef name:right arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "forward", - "source_code": "def forward(self, input: Tensor, output_size: Optional[list[int]] = None) -> Tensor: if self.padding_mode ! = 'zeros': raise ValueError('Only `zeros` padding mode is supported for ConvTranspose2d') assert isinstance(self.padding, tuple) num_spatial_dims = 2 output_padding = self._output_padding(input, output_size, self.stride, self.padding, self.kernel_size, num_spatial_dims, self.dilation) return F.conv_transpose2d(input, self.weight, self.bias, self.stride, self.padding, output_padding, self.groups, self.dilation)", - "docstring": "Performs the forward pass. Attributes: input (Tensor): The input tensor. output_size (list[int], optional): A list of integers representing the size of the output tensor. Default is None.", - "type": "method", - "file_path": "pytorch\\torch\\nn\\modules\\conv.py", - "ast_data": "FunctionDef name:forward arguments arg:self arg:input type:Tensor arg:output_size type:Optional[list[int]] If Compare op:NotEq Raise raises:ValueError('Only `zeros` padding mode is supported for ConvTranspose2d') Assign Assign Call call:_output_padding Return return:yes" - }, - { - "library": "tensorflow", - "name": "should_cast", - "source_code": "def should_cast(self, v): return self._dvariable.save_as_bf16 and v.dtype = = dtypes.float32", - "docstring": "Returns True if v has float32 dtype and is intructed to save as bf16. Args: v : The variable that determines whether to cast. Returns: True if current savable DVariable is instructed to save as bfloat16 and the variable has dtype float32.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\dtensor\\python\\d_variable.py", - "ast_data": "FunctionDef name:should_cast arguments arg:self arg:v Return return:yes" - }, - { - "library": "authlib", - "name": "acquire", - "source_code": "@contextmanager def acquire(self, scopes = None): try: yield self.acquire_token(scopes) except OAuth2Error as error: self.raise_error_response(error)", - "docstring": "The with statement of ``. Instead of using a decorator, you can use a with statement instead:: @app.route(\"/api/user\") def user_api(): with require_oauth.acquire(\"profile\") as token: user = User.get(token.user_id) return jsonify(user.to_dict())", - "type": "method", - "file_path": "authlib\\authlib\\integrations\\flask_oauth2\\resource_protector.py", - "ast_data": "FunctionDef name:acquire arguments arg:self arg:scopes Try ExceptHandler" - }, - { - "library": "pytorch", - "name": "autoclose", - "source_code": "def autoclose(self): self.close_on_last_child = True if self.child_counter = = 0: self.close()", - "docstring": "Automatically close stream when all child streams are closed or if there are none.", - "type": "method", - "file_path": "pytorch\\torch\\utils\\data\\datapipes\\utils\\common.py", - "ast_data": "FunctionDef name:autoclose arguments arg:self Assign If Compare op:Eq" - }, - { - "library": "mongo", - "name": "ConnectionReadyEvent", - "source_code": "class ConnectionReadyEvent(_ConnectionDurationEvent): __slots__ = ()", - "docstring": "Published when a Connection has finished its setup, and is ready to use. :param address: The address (host, port) pair of the server this Connection is attempting to connect to. :param connection_id: The integer ID of the Connection in this Pool. .. versionadded:: 3.9", - "type": "class", - "file_path": "mongo\\pymongo\\monitoring.py", - "ast_data": "ClassDef name:ConnectionReadyEvent Assign" - }, - { - "library": "pytorch", - "name": "select_model_mode_for_export", - "source_code": "@deprecated('Please set training mode before exporting the model', category = None) @contextlib.contextmanager def select_model_mode_for_export(model, mode: _C_onnx.TrainingMode): if not isinstance(mode, _C_onnx.TrainingMode): raise TypeError(f\"'mode' should be a torch.onnx.TrainingMode enum, but got '{type(mode)}'.\") originally_training: bool = False if hasattr(model, 'training'): originally_training = model.training if mode = = _C_onnx.TrainingMode.TRAINING or (mode = = _C_onnx.TrainingMode.PRESERVE and originally_training): GLOBALS.export_training = True if GLOBALS.export_onnx_opset_version < 12: warnings.warn(f'You are exporting the model in training mode with onnx opset version {GLOBALS.export_onnx_opset_version}. Opset versions lower than opset 12 will not be able to export nodes such as Dropout and BatchNorm correctly.') else: GLOBALS.export_training = False GLOBALS.training_mode = mode if mode = = _C_onnx.TrainingMode.TRAINING: model.train(True) elif mode = = _C_onnx.TrainingMode.EVAL: model.train(False) try: yield finally: if hasattr(model, 'training') and (not mode = = _C_onnx.TrainingMode.PRESERVE): model.train(originally_training)", - "docstring": "A context manager to temporarily set the training mode of `exportexport`.", - "type": "function", - "file_path": "pytorch\\torch\\onnx\\utils.py", - "ast_data": "FunctionDef name:select_model_mode_for_export arguments arg:model arg:mode type:_C_onnx.TrainingMode Call call:deprecated If Raise raises:TypeError(f\"'mode' should be a torch.onnx.TrainingMode enum, but got '{type(mode)}'.\") If Call call:hasattr Assign If BoolOp Compare op:Eq BoolOp Compare op:Eq Assign If Compare op:Lt Assign Assign If Compare op:Eq If Compare op:Eq Try If BoolOp Call call:hasattr" - }, - { - "library": "numpy", - "name": "getmask", - "source_code": "def getmask(a): return getattr(a, '_mask', nomask)", - "docstring": "Return the mask of a masked array, or nomask. Return the mask of as an ndarray if is a and the mask is not , else return . To guarantee a full array of booleans of the same shape as a, use . Parameters ---------- a : array_like Input for which the mask is required. See Also -------- getdata : Return the data of a masked array as an ndarray. getmaskarray : Return the mask of a masked array, or full array of False. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.masked_equal([[1,2],[3,4]], 2) >>> a masked_array( data=[[1, --], [3, 4]], mask=[[False, True], [False, False]], fill_value=2) >>> ma.getmask(a) array([[False, True], [False, False]]) Equivalently use the attribute. >>> a.mask array([[False, True], [False, False]]) Result when mask == >>> b = ma.masked_array([[1,2],[3,4]]) >>> b masked_array( data=[[1, 2], [3, 4]], mask=False, fill_value=999999) >>> ma.nomask False >>> ma.getmask(b) == ma.nomask True >>> b.mask == ma.nomask True", - "type": "function", - "file_path": "numpy\\numpy\\ma\\core.py", - "ast_data": "FunctionDef name:getmask arguments arg:a Return return:yes" - }, - { - "library": "pytorch", - "name": "fqn_to_module", - "source_code": "def fqn_to_module(model: Optional[nn.Module], path: str) -> Optional[nn.Module]: if path ! = '': for name in path.split('.'): model = getattr(model, name, None) return model", - "docstring": "Given an fqn, returns the corresponding module or tensor or None if the fqn given by doesn't correspond to anything. Similar to model.get_submodule(path) but works for tensors.", - "type": "function", - "file_path": "pytorch\\torch\\ao\\pruning\\sparsifier\\utils.py", - "ast_data": "FunctionDef name:fqn_to_module arguments arg:model type:Optional[nn.Module] arg:path type:str If Compare op:NotEq For Call call:split Assign Call call:getattr Return return:yes" - }, - { - "library": "tensorflow", - "name": "squared_hinge", - "source_code": "@dispatch.add_dispatch_support def squared_hinge(y_true, y_pred): y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) y_true = _maybe_convert_labels(y_true) return backend.mean(math_ops.square(math_ops.maximum(1.0 - y_true * y_pred, 0.0)), axis = -1)", - "docstring": "Computes the squared hinge loss between and . Standalone usage: >>> y_true = np.random.choice([-1, 1], size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.squared_hinge(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), ... np.mean(np.square(np.maximum(1. - y_true * y_pred, 0.)), axis=-1)) Args: y_true: The ground truth values. values are expected to be -1 or 1. If binary (0 or 1) labels are provided we will convert them to -1 or 1. shape = . y_pred: The predicted values. shape = . Returns: Squared hinge loss values. shape = .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", - "ast_data": "FunctionDef name:squared_hinge arguments arg:y_true arg:y_pred Assign Call call:convert_to_tensor_v2_with_dispatch Assign Call call:cast Assign Call call:_maybe_convert_labels Return return:yes" - }, - { - "library": "django", - "name": "parse_number", - "source_code": "@classmethod def parse_number(cls, name): if (squashed_match: = re.search('.*_squashed_(\\\\d+)', name)): return int(squashed_match[1]) match = re.match('^\\\\d+', name) if match: return int(match[0]) return None", - "docstring": "Given a migration name, try to extract a number from the beginning of it. For a squashed migration such as '0001_squashed_0004…', return the second number. If no number is found, return None.", - "type": "method", - "file_path": "django\\django\\db\\migrations\\autodetector.py", - "ast_data": "FunctionDef name:parse_number arguments arg:cls arg:name If Return return:yes Assign Call call:match If Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "execute_sql", - "source_code": "def execute_sql(self, result_type): row_count = super().execute_sql(result_type) is_empty = row_count is None row_count = row_count or 0 for query in self.query.get_related_updates(): aux_row_count = query.get_compiler(self.using).execute_sql(result_type) if is_empty and aux_row_count: row_count = aux_row_count is_empty = False return row_count", - "docstring": "Execute the specified update. Return the number of rows affected by the primary update query. The \"primary update query\" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available.", - "type": "method", - "file_path": "django\\django\\db\\models\\sql\\compiler.py", - "ast_data": "FunctionDef name:execute_sql arguments arg:self arg:result_type Assign Call call:execute_sql Assign Compare op:Is Assign BoolOp For Call call:get_related_updates Assign Call call:execute_sql If BoolOp Assign Assign Return return:yes" - }, - { - "library": "pandas", - "name": "__getitem__", - "source_code": "def __getitem__(self, item: PositionalIndexer) -> Self | Any: raise AbstractMethodError(self)", - "docstring": "Select a subset of self. Parameters ---------- item : int, slice, or ndarray * int: The position in 'self' to get. * slice: A slice object, where 'start', 'stop', and 'step' are integers or None * ndarray: A 1-d boolean NumPy ndarray the same length as 'self' * list[int]: A list of int Returns ------- item : scalar or ExtensionArray Notes ----- For scalar `` is True.", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\base.py", - "ast_data": "FunctionDef name:__getitem__ arguments arg:self arg:item type:PositionalIndexer Raise raises:AbstractMethodError(self)" - }, - { - "library": "pytorch", - "name": "has_fake_kernel", - "source_code": "def has_fake_kernel(op: torch._ops.OpOverload) -> bool: if can_generate_trivial_fake_impl(op): return True name = op._name if torch._C._dispatch_has_kernel_for_dispatch_key(name, 'CompositeImplicitAutograd'): return True opdef = torch._library.custom_ops._maybe_get_opdef(name) if opdef is None: if torch._C._dispatch_has_kernel_for_dispatch_key(name, 'CompositeExplicitAutograd'): return True entry = torch._library.simple_registry.singleton.find(name) if entry.fake_impl.kernel is not None: return True if torch._C._dispatch_has_kernel_for_dispatch_key(name, 'Meta'): return True elif opdef._abstract_fn is not None: return True return False", - "docstring": "If an operator (that stays alive until FakeTensorMode) has a Fake kernel. Don't use this if the operator decomposes before FakeTensorMode.", - "type": "function", - "file_path": "pytorch\\torch\\_library\\utils.py", - "ast_data": "FunctionDef name:has_fake_kernel arguments arg:op type:torch._ops.OpOverload If Call call:can_generate_trivial_fake_impl Return return:yes Assign If Call call:_dispatch_has_kernel_for_dispatch_key Return return:yes Assign Call call:_maybe_get_opdef If Compare op:Is If Call call:_dispatch_has_kernel_for_dispatch_key Return return:yes Assign Call call:find If Compare op:IsNot Return return:yes If Call call:_dispatch_has_kernel_for_dispatch_key Return return:yes If Compare op:IsNot Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "name", - "source_code": "@property def name(self) -> str: raise AbstractMethodError(self)", - "docstring": "A string identifying the data type. Will be used for display in, e.g. ``", - "type": "method", - "file_path": "pandas\\pandas\\core\\dtypes\\base.py", - "ast_data": "FunctionDef name:name arguments arg:self Raise raises:AbstractMethodError(self)" - }, - { - "library": "tensorflow", - "name": "SaveableCompatibilityConverter", - "source_code": "class SaveableCompatibilityConverter(trackable.Trackable): __slots__ = ('_obj', '_saveables') def __init__(self, obj, saveables): self._obj = obj self._saveables = saveables @property def obj(self): return self._obj @property def saveables(self): return self._saveables def _serialize_to_tensors(self): return saveable_object_to_tensor_dict(self.saveables) def _restore_from_tensors(self, restored_tensors): expected_keys = [] for saveable in self.saveables: expected_keys.extend((trackable_utils.extract_local_name(_convert_to_string(spec.name)) for spec in saveable.specs)) if set(expected_keys) ! = restored_tensors.keys(): raise ValueError(f'Could not restore object {self._obj} because not all expected tensors were in the checkpoint.\\n\\tExpected: {expected_keys}\\n\\tGot: {list(restored_tensors.keys())}') return saveable_object_to_restore_fn(self.saveables)(restored_tensors)", - "docstring": "Converts object's to functions used in TF2 checkpointing. A class that converts a Trackable object's to save and restore functions with the same signatures as and . This class also produces a method for filling the object proto.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py", - "ast_data": "ClassDef name:SaveableCompatibilityConverter Assign FunctionDef name:__init__ arguments arg:self arg:obj arg:saveables Assign Assign FunctionDef name:obj arguments arg:self Return return:yes FunctionDef name:saveables arguments arg:self Return return:yes FunctionDef name:_serialize_to_tensors arguments arg:self Return return:yes FunctionDef name:_restore_from_tensors arguments arg:self arg:restored_tensors Assign For If Compare op:NotEq Raise raises:ValueError(f'Could not restore object {self._obj} because not all expected tensors were in the checkpoint.\\n\\tExpected: {expected_keys}\\n\\tGot: {list(restored_tensors.keys())}') Return return:yes" - }, - { - "library": "pytorch", - "name": "range_pop", - "source_code": "def range_pop(): return _itt.rangePop()", - "docstring": "Pops a range off of a stack of nested range spans. Returns the zero-based depth of the range that is ended.", - "type": "function", - "file_path": "pytorch\\torch\\profiler\\itt.py", - "ast_data": "FunctionDef name:range_pop arguments Return return:yes" - }, - { - "library": "django", - "name": "unapply", - "source_code": "def unapply(self, project_state, schema_editor, collect_sql = False): to_run = [] new_state = project_state for operation in self.operations: if not operation.reversible: raise IrreversibleError('Operation %s in %s is not reversible' % (operation, self)) new_state = new_state.clone() old_state = new_state.clone() operation.state_forwards(self.app_label, new_state) to_run.insert(0, (operation, old_state, new_state)) for operation, to_state, from_state in to_run: if collect_sql: schema_editor.collected_sql.append('--') schema_editor.collected_sql.append('-- %s' % operation.describe()) schema_editor.collected_sql.append('--') if not operation.reduces_to_sql: schema_editor.collected_sql.append('-- THIS OPERATION CANNOT BE WRITTEN AS SQL') continue collected_sql_before = len(schema_editor.collected_sql) atomic_operation = operation.atomic or (self.atomic and operation.atomic is not False) if not schema_editor.atomic_migration and atomic_operation: with atomic(schema_editor.connection.alias): operation.database_backwards(self.app_label, schema_editor, from_state, to_state) else: operation.database_backwards(self.app_label, schema_editor, from_state, to_state) if collect_sql and collected_sql_before = = len(schema_editor.collected_sql): schema_editor.collected_sql.append('-- (no-op)') return project_state", - "docstring": "Take a project_state representing all migrations prior to this one and a schema_editor for a live database and apply the migration in a reverse order. The backwards migration process consists of two phases: 1. The intermediate states from right before the first until right after the last operation inside this migration are preserved. 2. The operations are applied in reverse order using the states recorded in step 1.", - "type": "method", - "file_path": "django\\django\\db\\migrations\\migration.py", - "ast_data": "FunctionDef name:unapply arguments arg:self arg:project_state arg:schema_editor arg:collect_sql Assign Assign For If Raise raises:IrreversibleError('Operation %s in %s is not reversible' % (operation, self)) Assign Call call:clone Assign Call call:clone For If If Assign Call call:len Assign BoolOp BoolOp Compare op:IsNot If BoolOp With If BoolOp Compare op:Eq Return return:yes" - }, - { - "library": "scikit-learn", - "name": "asarray", - "source_code": "def asarray(obj: complex | NestedSequence[complex] | Array | SupportsBufferProtocol, /, *, dtype: DType | None = None, device: Device | None = None, copy: py_bool | None = None, **kwargs: object) -> Array: _helpers._check_device(da, device) if isinstance(obj, da.Array): if dtype is not None and dtype ! = obj.dtype: if copy is False: raise ValueError('Unable to avoid copy when changing dtype') obj = obj.astype(dtype) return obj.copy() if copy else obj if copy is False: raise ValueError('Unable to avoid copy when converting a non-dask object to dask') obj = np.array(obj, dtype = dtype, copy = True) return da.from_array(obj)", - "docstring": "Array API compatibility wrapper for asarray(). See the corresponding documentation in the array library and/or the array API specification for more details.", - "type": "function", - "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\dask\\array\\_aliases.py", - "ast_data": "FunctionDef name:asarray arguments kwarg:kwargs If Call call:isinstance If BoolOp Compare op:IsNot Compare op:NotEq If Compare op:Is Raise raises:ValueError('Unable to avoid copy when changing dtype') Assign Call call:astype Return return:yes If Compare op:Is Raise raises:ValueError('Unable to avoid copy when converting a non-dask object to dask') Assign Call call:array Return return:yes" - }, - { - "library": "matplotlib", - "name": "flipy", - "source_code": "def flipy(self): return True", - "docstring": "Return whether y values increase from top to bottom. Note that this only affects drawing of texts.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", - "ast_data": "FunctionDef name:flipy arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "average_parameters_or_parameter_groups", - "source_code": "def average_parameters_or_parameter_groups(params: Union[Iterable[torch.nn.Parameter], Iterable[dict[str, torch.nn.Parameter]]], process_group: ProcessGroup): average_parameters(iter(get_params_to_average(params)), process_group)", - "docstring": "Averages parameters of a model or parameter groups of an optimizer.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\algorithms\\model_averaging\\utils.py", - "ast_data": "FunctionDef name:average_parameters_or_parameter_groups arguments arg:params type:Union[Iterable[torch.nn.Parameter], Iterable[dict[str, torch.nn.Parameter]]] arg:process_group type:ProcessGroup" - }, - { - "library": "pytorch", - "name": "half", - "source_code": "def half(self) -> Self: return self._apply(lambda t: t.half() if t.is_floating_point() else t)", - "docstring": "Casts all floating point parameters and buffers to `` datatype. .. note:: This method modifies the module in-place. Returns: Module: self", - "type": "method", - "file_path": "pytorch\\torch\\nn\\modules\\module.py", - "ast_data": "FunctionDef name:half arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "full", - "source_code": "def full(sharding_spec: ShardingSpec, size, fill_value, *, dtype = None, layout = torch.strided, requires_grad = False, pin_memory = False, memory_format = torch.contiguous_format, process_group = None, init_rrefs = False) -> ShardedTensor: sharded_tensor = ShardedTensor(sharding_spec, *size, dtype = dtype, layout = layout, requires_grad = requires_grad, pin_memory = pin_memory, memory_format = memory_format, process_group = process_group, init_rrefs = init_rrefs) torch.nn.init.constant_(sharded_tensor, fill_value) return sharded_tensor", - "docstring": "Creates a :class: filled with fill_value. The tensor's dtype is inferred from fill_value. If dtype is specified, it will override the inferred type from fill_value. Needs to be called on all ranks in an SPMD fashion. Args: sharding_spec (:class:): The specification describing how to shard the Tensor. size (int...): a list, tuple, or of integers defining the shape of the output tensor. fill_value (Scalar) - the value to fill the output tensor with. Keyword args: dtype (:class:, optional): the desired data type of returned tensor. Default: if `torch.set_default_dtypetorch.layouttorch.distributed.rpc.RRefShardedTensor` object on each rank", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\__init__.py", - "ast_data": "FunctionDef name:full arguments arg:sharding_spec type:ShardingSpec arg:size arg:fill_value Assign Call call:ShardedTensor Return return:yes" - }, - { - "library": "tensorflow", - "name": "on_predict_batch_end", - "source_code": "@doc_controls.for_subclass_implementers @generic_utils.default def on_predict_batch_end(self, batch, logs = None): pass", - "docstring": "Called at the end of a batch in methods. Subclasses should override for any actions to run. Note that if the argument to in is set to , this method will only be called every batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", - "ast_data": "FunctionDef name:on_predict_batch_end arguments arg:self arg:batch arg:logs" - }, - { - "library": "pytorch", - "name": "is_sharded", - "source_code": "def is_sharded(self) -> bool: return any((placement.is_shard() for placement in self.placements))", - "docstring": "return True if the current DTensorSpec is sharded on any mesh dims (devices)", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\tensor\\_dtensor_spec.py", - "ast_data": "FunctionDef name:is_sharded arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "get_template_names", - "source_code": "def get_template_names(self): if self.template_name is None: raise ImproperlyConfigured(\"TemplateResponseMixin requires either a definition of 'template_name' or an implementation of 'get_template_names()'\") else: return [self.template_name]", - "docstring": "Return a list of template names to be used for the request. Must return a list. May not be called if render_to_response() is overridden.", - "type": "method", - "file_path": "django\\django\\views\\generic\\base.py", - "ast_data": "FunctionDef name:get_template_names arguments arg:self If Compare op:Is Raise raises:ImproperlyConfigured(\"TemplateResponseMixin requires either a definition of 'template_name' or an implementation of 'get_template_names()'\") Return return:yes" - }, - { - "library": "scikit-learn", - "name": "loads", - "source_code": "def loads(s, encode_nominal = False, return_type = DENSE): decoder = ArffDecoder() return decoder.decode(s, encode_nominal = encode_nominal, return_type = return_type)", - "docstring": "Convert a string instance containing the ARFF document into a Python object. :param s: a string object. :param encode_nominal: boolean, if True perform a label encoding while reading the .arff file. :param return_type: determines the data structure used to store the dataset. Can be one of , , , or . Consult the sections on _ and _. :return: a dictionary.", - "type": "function", - "file_path": "scikit-learn\\sklearn\\externals\\_arff.py", - "ast_data": "FunctionDef name:loads arguments arg:s arg:encode_nominal arg:return_type Assign Call call:ArffDecoder Return return:yes" - }, - { - "library": "kornia", - "name": "RgbToHls", - "source_code": "class RgbToHls(Module): ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1] ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1] def forward(self, image: Tensor) -> Tensor: return rgb_to_hls(image)", - "docstring": "Convert an image from RGB to HLS. The image data is assumed to be in the range of (0, 1). Returns: HLS version of the image. Shape: - image: :math: - output: :math: Examples: >>> input = torch.rand(2, 3, 4, 5) >>> hls = RgbToHls() >>> output = hls(input) # 2x3x4x5", - "type": "class", - "file_path": "kornia\\kornia\\color\\hls.py", - "ast_data": "ClassDef name:RgbToHls FunctionDef name:forward arguments arg:self arg:image type:Tensor Return return:yes" - }, - { - "library": "tensorflow", - "name": "TFDataServiceConfig", - "source_code": "@dataclasses.dataclass class TFDataServiceConfig: dispatcher_address: str job_name: str", - "docstring": "Specifies the tf.data service configuration to use. Attributes: dispatcher_address: a string specifying the address of the tf.data service dispatcher server. job_name: a non-empty string identifying the shared job that will be created on tf.data service to process this dataset.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\dtensor\\python\\input_util.py", - "ast_data": "ClassDef name:TFDataServiceConfig" - }, - { - "library": "pytorch", - "name": "get_log_level_pairs", - "source_code": "def get_log_level_pairs(self): return self.log_qname_to_level.items()", - "docstring": "Returns all qualified module names for which the user requested explicit logging settings. .. warning: This function used to return all loggers, regardless of whether or not the user specified them or not; it now only returns logs which were explicitly mentioned by the user (and torch, which always is implicitly requested when we initialize our logging subsystem.)", - "type": "method", - "file_path": "pytorch\\torch\\_logging\\_internal.py", - "ast_data": "FunctionDef name:get_log_level_pairs arguments arg:self Return return:yes" - }, - { - "library": "pygame", - "name": "add_internal", - "source_code": "def add_internal(self, sprite, layer = None): if not hasattr(sprite, 'dirty'): raise AttributeError() if not hasattr(sprite, 'visible'): raise AttributeError() if not hasattr(sprite, 'blendmode'): raise AttributeError() if not isinstance(sprite, DirtySprite): raise TypeError() if sprite.dirty = = 0: sprite.dirty = 1 LayeredUpdates.add_internal(self, sprite, layer)", - "docstring": "Do not use this method directly. It is used by the group to add a sprite internally.", - "type": "method", - "file_path": "pygame\\src_py\\sprite.py", - "ast_data": "FunctionDef name:add_internal arguments arg:self arg:sprite arg:layer If Raise raises:AttributeError() If Raise raises:AttributeError() If Raise raises:AttributeError() If Raise raises:TypeError() If Compare op:Eq Assign" - }, - { - "library": "pygame", - "name": "set_cursor", - "source_code": "def set_cursor(*args): cursor = Cursor(*args) pygame.mouse._set_cursor(**{cursor.type: cursor.data})", - "docstring": "set_cursor(pygame.cursors.Cursor OR args for a pygame.cursors.Cursor) -> None set the mouse cursor to a new cursor", - "type": "function", - "file_path": "pygame\\src_py\\cursors.py", - "ast_data": "FunctionDef name:set_cursor arguments vararg:args Assign Call call:Cursor" - }, - { - "library": "scikit-learn", - "name": "autolabel_auc", - "source_code": "def autolabel_auc(rects, ax): for rect in rects: height = rect.get_height() ax.text(rect.get_x() + rect.get_width() / 2.0, 1.05 * height, '%.3f' % height, ha = 'center', va = 'bottom')", - "docstring": "Attach a text label above each bar displaying its height.", - "type": "function", - "file_path": "scikit-learn\\benchmarks\\bench_online_ocsvm.py", - "ast_data": "FunctionDef name:autolabel_auc arguments arg:rects arg:ax For Assign Call call:get_height" - }, - { - "library": "numpy", - "name": "count", - "source_code": "def count(self, sub, start = 0, end = None): return count(self, sub, start, end)", - "docstring": "Returns an array with the number of non-overlapping occurrences of substring in the range [, ]. See Also -------- char.count", - "type": "method", - "file_path": "numpy\\numpy\\_core\\defchararray.py", - "ast_data": "FunctionDef name:count arguments arg:self arg:sub arg:start arg:end Return return:yes" - }, - { - "library": "matplotlib", - "name": "format_ydata", - "source_code": "def format_ydata(self, y): return (self.fmt_ydata if self.fmt_ydata is not None else self.yaxis.get_major_formatter().format_data_short)(y)", - "docstring": "Return *y* formatted as a y-value. This function will use the attribute if it is not None, else will fall back on the yaxis major formatter.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", - "ast_data": "FunctionDef name:format_ydata arguments arg:self arg:y Return return:yes" - }, - { - "library": "kornia", - "name": "warp_grid", - "source_code": "def warp_grid(grid: Tensor, src_homo_dst: Tensor) -> Tensor: batch_size: int = src_homo_dst.size(0) _, height, width, _ = grid.size() grid = grid.expand(batch_size, -1, -1, -1) if len(src_homo_dst.shape) = = 3: src_homo_dst = src_homo_dst.view(batch_size, 1, 3, 3) flow: Tensor = transform_points(src_homo_dst, grid.to(src_homo_dst)) return flow.view(batch_size, height, width, 2)", - "docstring": "Compute the grid to warp the coordinates grid by the homography/ies. Args: grid: Unwrapped grid of the shape :math:. src_homo_dst: Homography or homographies (stacked) to transform all points in the grid. Shape of the homography has to be :math: or :math:. Returns: the transformed grid of shape :math:.", - "type": "function", - "file_path": "kornia\\kornia\\geometry\\transform\\imgwarp.py", - "ast_data": "FunctionDef name:warp_grid arguments arg:grid type:Tensor arg:src_homo_dst type:Tensor Assign Call call:size Assign Call call:expand If Compare op:Eq Assign Call call:view Return return:yes" - }, - { - "library": "numpy", - "name": "lagmul", - "source_code": "def lagmul(c1, c2): [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): c = c2 xs = c1 else: c = c1 xs = c2 if len(c) = = 1: c0 = c[0] * xs c1 = 0 elif len(c) = = 2: c0 = c[0] * xs c1 = c[1] * xs else: nd = len(c) c0 = c[-2] * xs c1 = c[-1] * xs for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 c0 = lagsub(c[-i] * xs, c1 * (nd - 1) / nd) c1 = lagadd(tmp, lagsub((2 * nd - 1) * c1, lagmulx(c1)) / nd) return lagadd(c0, lagsub(c1, lagmulx(c1)))", - "docstring": "Multiply one Laguerre series by another. Returns the product of two Laguerre series * . The arguments are sequences of coefficients, from lowest order \"term\" to highest, e.g., [1,2,3] represents the series ``. Parameters ---------- c1, c2 : array_like 1-D arrays of Laguerre series coefficients ordered from low to high. Returns ------- out : ndarray Of Laguerre series coefficients representing their product. See Also -------- lagadd, lagsub, lagmulx, lagdiv, lagpow Notes ----- In general, the (polynomial) product of two C-series results in terms that are not in the Laguerre polynomial basis set. Thus, to express the product as a Laguerre series, it is necessary to \"reproject\" the product onto said basis set, which may produce \"unintuitive\" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial.laguerre import lagmul >>> lagmul([1, 2, 3], [0, 1, 2]) array([ 8., -13., 38., -51., 36.])", - "type": "function", - "file_path": "numpy\\numpy\\polynomial\\laguerre.py", - "ast_data": "FunctionDef name:lagmul arguments arg:c1 arg:c2 Assign Call call:as_series If Compare op:Gt Assign Assign Assign Assign If Compare op:Eq Assign Assign If Compare op:Eq Assign Assign Assign Call call:len Assign Assign For Call call:range Assign Assign Assign Call call:lagsub Assign Call call:lagadd Return return:yes" - }, - { - "library": "pytorch", - "name": "unpackage_script_module", - "source_code": "def unpackage_script_module(importer: PackageImporter, script_module_id: str) -> torch.nn.Module: if not isinstance(importer.zip_reader, torch._C.PyTorchFileReader): raise RuntimeError('Loading ScriptObjects from a PackageImporter created from a directory is not supported. Use a package archive file instead.') cu = torch._C.CompilationUnit() cpp_module = torch._C._import_ir_module_from_package(cu, importer.zip_reader, importer.storage_context, validate_map_location(importer.last_map_location), script_module_id) return wrap_cpp_module(cpp_module)", - "docstring": "Call by `` archive.", - "type": "function", - "file_path": "pytorch\\torch\\jit\\_script.py", - "ast_data": "FunctionDef name:unpackage_script_module arguments arg:importer type:PackageImporter arg:script_module_id type:str If Raise raises:RuntimeError('Loading ScriptObjects from a PackageImporter created from a directory is not supported. Use a package archive file instead.') Assign Call call:CompilationUnit Assign Call call:_import_ir_module_from_package Return return:yes" - }, - { - "library": "django", - "name": "get", - "source_code": "def get(self, field): field_name = getattr(field, 'name', field) return self[field_name].value", - "docstring": "Return the value of the field, instead of an instance of the Field object. May take a string of the field name or a Field object as parameters.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\feature.py", - "ast_data": "FunctionDef name:get arguments arg:self arg:field Assign Call call:getattr Return return:yes" - }, - { - "library": "django", - "name": "add_error", - "source_code": "def add_error(self, field, error): if not isinstance(error, ValidationError): error = ValidationError(error) if hasattr(error, 'error_dict'): if field is not None: raise TypeError('The argument `field` must be `None` when the `error` argument contains errors for multiple fields.') else: error = error.error_dict else: error = {field or NON_FIELD_ERRORS: error.error_list} for field, error_list in error.items(): if field not in self.errors: if field ! = NON_FIELD_ERRORS and field not in self.fields: raise ValueError(\"'%s' has no field named '%s'.\" % (self.__class__.__name__, field)) if field = = NON_FIELD_ERRORS: self._errors[field] = self.error_class(error_class = 'nonfield', renderer = self.renderer) else: self._errors[field] = self.error_class(renderer = self.renderer, field_id = self[field].auto_id) self._errors[field].extend(error_list) if field in self.cleaned_data: del self.cleaned_data[field]", - "docstring": "Update the content of . The argument is the name of the field to which the errors should be added. If it's None, treat the errors as NON_FIELD_ERRORS. The argument can be a single error, a list of errors, or a dictionary that maps field names to lists of errors. An \"error\" can be either a simple string or an instance of ValidationError with its message attribute set and a \"list or dictionary\" can be an actual or or an instance of ValidationError with its or attribute set. If is a dictionary, the argument *must* be None and errors will be added to the fields that correspond to the keys of the dictionary.", - "type": "method", - "file_path": "django\\django\\forms\\forms.py", - "ast_data": "FunctionDef name:add_error arguments arg:self arg:field arg:error If Assign Call call:ValidationError If Call call:hasattr If Compare op:IsNot Raise raises:TypeError('The argument `field` must be `None` when the `error` argument contains errors for multiple fields.') Assign Assign For Call call:items If Compare op:NotIn If BoolOp Compare op:NotEq Compare op:NotIn Raise raises:ValueError(\"'%s' has no field named '%s'.\" % (self.__class__.__name__, field)) If Compare op:Eq Assign Call call:error_class Assign Call call:error_class If Compare op:In" - }, - { - "library": "matplotlib", - "name": "set_params", - "source_code": "def set_params(self, **kwargs): if 'nbins' in kwargs: self._nbins = kwargs.pop('nbins') if self._nbins ! = 'auto': self._nbins = int(self._nbins) if 'symmetric' in kwargs: self._symmetric = kwargs.pop('symmetric') if 'prune' in kwargs: prune = kwargs.pop('prune') _api.check_in_list(['upper', 'lower', 'both', None], prune = prune) self._prune = prune if 'min_n_ticks' in kwargs: self._min_n_ticks = max(1, kwargs.pop('min_n_ticks')) if 'steps' in kwargs: steps = kwargs.pop('steps') if steps is None: self._steps = np.array([1, 1.5, 2, 2.5, 3, 4, 5, 6, 8, 10]) else: self._steps = self._validate_steps(steps) self._extended_steps = self._staircase(self._steps) if 'integer' in kwargs: self._integer = kwargs.pop('integer') if kwargs: raise _api.kwarg_error('set_params', kwargs)", - "docstring": "Set parameters for this locator. Parameters ---------- nbins : int or 'auto', optional see steps : array-like, optional see integer : bool, optional see symmetric : bool, optional see prune : {'lower', 'upper', 'both', None}, optional see min_n_ticks : int, optional see", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", - "ast_data": "FunctionDef name:set_params arguments arg:self kwarg:kwargs If Compare op:In Assign Call call:pop If Compare op:NotEq Assign Call call:int If Compare op:In Assign Call call:pop If Compare op:In Assign Call call:pop Assign If Compare op:In Assign Call call:max If Compare op:In Assign Call call:pop If Compare op:Is Assign Call call:array Assign Call call:_validate_steps Assign Call call:_staircase If Compare op:In Assign Call call:pop If Raise raises:_api.kwarg_error('set_params', kwargs)" - }, - { - "library": "feincms", - "name": "get_extra_context", - "source_code": "def get_extra_context(self, request): extra_context = {'request': request, 'model': self.model, 'available_templates': getattr(self.model, '_feincms_templates', ()), 'has_parent_attribute': hasattr(self.model, 'parent'), 'content_types': self.get_content_type_map(request), 'FEINCMS_CONTENT_FIELDSET_NAME': FEINCMS_CONTENT_FIELDSET_NAME} for processor in self.model.feincms_item_editor_context_processors: extra_context.update(processor(request)) return extra_context", - "docstring": "Return extra context parameters for add/change views.", - "type": "method", - "file_path": "feincms\\feincms\\admin\\item_editor.py", - "ast_data": "FunctionDef name:get_extra_context arguments arg:self arg:request Assign For Return return:yes" - }, - { - "library": "matplotlib", - "name": "remove", - "source_code": "def remove(self): if hasattr(self.ax, '_colorbar_info'): parents = self.ax._colorbar_info['parents'] for a in parents: if self.ax in a._colorbars: a._colorbars.remove(self.ax) self.ax.remove() self.mappable.callbacks.disconnect(self.mappable.colorbar_cid) self.mappable.colorbar = None self.mappable.colorbar_cid = None self.ax.callbacks.disconnect(self._extend_cid1) self.ax.callbacks.disconnect(self._extend_cid2) try: ax = self.mappable.axes except AttributeError: return try: subplotspec = self.ax.get_subplotspec().get_gridspec()._subplot_spec except AttributeError: pos = ax.get_position(original = True) ax._set_position(pos) else: ax.set_subplotspec(subplotspec)", - "docstring": "Remove this colorbar from the figure. If the colorbar was created with `` the previous gridspec is restored.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py", - "ast_data": "FunctionDef name:remove arguments arg:self If Call call:hasattr Assign For If Compare op:In Assign Assign Try Assign ExceptHandler Return return:no Try Assign ExceptHandler Assign Call call:get_position" - }, - { - "library": "pandas", - "name": "evaluate", - "source_code": "def evaluate(self, env, engine: str, parser, term_type, eval_in_python): if engine = = 'python': res = self(env) else: left = self.lhs.evaluate(env, engine = engine, parser = parser, term_type = term_type, eval_in_python = eval_in_python) right = self.rhs.evaluate(env, engine = engine, parser = parser, term_type = term_type, eval_in_python = eval_in_python) if self.op in eval_in_python: res = self.func(left.value, right.value) else: from pandas.core.computation.eval import eval res = eval(self, local_dict = env, engine = engine, parser = parser) name = env.add_tmp(res) return term_type(name, env = env)", - "docstring": "Evaluate a binary operation *before* being passed to the engine. Parameters ---------- env : Scope engine : str parser : str term_type : type eval_in_python : list Returns ------- term_type The \"pre-evaluated\" expression as an instance of ``", - "type": "method", - "file_path": "pandas\\pandas\\core\\computation\\ops.py", - "ast_data": "FunctionDef name:evaluate arguments arg:self arg:env arg:engine type:str arg:parser arg:term_type arg:eval_in_python If Compare op:Eq Assign Call call:self Assign Call call:evaluate Assign Call call:evaluate If Compare op:In Assign Call call:func Assign Call call:eval Assign Call call:add_tmp Return return:yes" - }, - { - "library": "scipy", - "name": "isintlike", - "source_code": "def isintlike(x) -> bool: if np.ndim(x) ! = 0: return False try: operator.index(x) except (TypeError, ValueError): try: loose_int = bool(int(x) = = x) except (TypeError, ValueError): return False if loose_int: msg = 'Inexact indices into sparse matrices are not allowed' raise ValueError(msg) return loose_int return True", - "docstring": "Is x appropriate as an index into a sparse matrix? Returns True if it can be cast safely to a machine int.", - "type": "function", - "file_path": "scipy\\scipy\\sparse\\_sputils.py", - "ast_data": "FunctionDef name:isintlike arguments arg:x If Compare op:NotEq Return return:yes Try ExceptHandler Try Assign Call call:bool ExceptHandler Return return:yes If Assign Raise raises:ValueError(msg) Return return:yes Return return:yes" - }, - { - "library": "mongo", - "name": "gen_list_name", - "source_code": "def gen_list_name() -> Generator[bytes, None, None]: yield from _LIST_NAMES counter = itertools.count(1000) while True: yield (str(next(counter)) + '\\x00').encode('utf8')", - "docstring": "Generate \"keys\" for encoded lists in the sequence b\"0\u0000\", b\"1\u0000\", b\"2\u0000\", ... The first 1000 keys are returned from a pre-built cache. All subsequent keys are generated on the fly.", - "type": "function", - "file_path": "mongo\\bson\\__init__.py", - "ast_data": "FunctionDef name:gen_list_name arguments Assign Call call:count While" - }, - { - "library": "pytorch", - "name": "pre_save", - "source_code": "def pre_save(self): check_metadata_cacheable(self.runtime_metadata) self.compiled_fw.pre_save() if self.compiled_bw is not None: self.compiled_bw.pre_save()", - "docstring": "Perform any preparations to make the cache entry ready for serialization.", - "type": "method", - "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\autograd_cache.py", - "ast_data": "FunctionDef name:pre_save arguments arg:self If Compare op:IsNot" - }, - { - "library": "tensorflow", - "name": "executing_eagerly_v1", - "source_code": "@tf_export(v1 = ['executing_eagerly']) def executing_eagerly_v1(): return executing_eagerly()", - "docstring": "Checks whether the current thread has eager execution enabled. Eager execution is typically enabled via , but may also be enabled within the context of a Python function via tf.contrib.eager.py_func. When eager execution is enabled, returns in most cases. However, this API might return in the following use cases. * Executing inside , unless under or is previously called. * Executing inside a transformation function for . * is called. >>> tf.compat.v1.enable_eager_execution() General case: >>> print(tf.executing_eagerly()) True Inside : >>> @tf.function ... def fn(): ... with tf.init_scope(): ... print(tf.executing_eagerly()) ... print(tf.executing_eagerly()) >>> fn() True False Inside after is called: >>> tf.config.run_functions_eagerly(True) >>> @tf.function ... def fn(): ... with tf.init_scope(): ... print(tf.executing_eagerly()) ... print(tf.executing_eagerly()) >>> fn() True True >>> tf.config.run_functions_eagerly(False) Inside a transformation function for : >>> def data_fn(x): ... print(tf.executing_eagerly()) ... return x >>> dataset = tf.data.Dataset.range(100) >>> dataset = dataset.map(data_fn) False Returns: if the current thread has eager execution enabled.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", - "ast_data": "FunctionDef name:executing_eagerly_v1 arguments Call call:tf_export Return return:yes" - }, - { - "library": "pytorch", - "name": "SaliencyPruner", - "source_code": "class SaliencyPruner(BaseStructuredSparsifier): def update_mask(self, module, tensor_name, **kwargs): weights = getattr(module, tensor_name) mask = getattr(module.parametrizations, tensor_name)[0].mask if weights.dim() < = 1: raise Exception('Structured pruning can only be applied to a 2+dim weight tensor!') saliency = -weights.norm(dim = tuple(range(1, weights.dim())), p = 1) assert saliency.shape = = mask.shape num_to_pick = int(len(mask) * kwargs['sparsity_level']) prune = saliency.topk(num_to_pick).indices mask.data[prune] = False", - "docstring": "Prune rows based on the saliency (L1 norm) of each row. This pruner works on N-Dimensional weight tensors. For each row, we will calculate the saliency, whic is the sum the L1 norm of all weights in that row. We expect that the resulting saliency vector has the same shape as our mask. We then pick elements to remove until we reach the target sparsity_level.", - "type": "class", - "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\pruner\\saliency_pruner.py", - "ast_data": "ClassDef name:SaliencyPruner FunctionDef name:update_mask arguments arg:self arg:module arg:tensor_name kwarg:kwargs Assign Call call:getattr Assign If Compare op:LtE Raise raises:Exception('Structured pruning can only be applied to a 2+dim weight tensor!') Assign Assign Call call:int Assign Assign" - }, - { - "library": "matplotlib", - "name": "new_locator", - "source_code": "def new_locator(self, ny, ny1 = None): return super().new_locator(0, ny, 0, ny1)", - "docstring": "Create an axes locator callable for the specified cell. Parameters ---------- ny, ny1 : int Integers specifying the row-position of the cell. When *ny1* is None, a single *ny*-th row is specified. Otherwise, location of rows spanning between *ny* to *ny1* (but excluding *ny1*-th row) is specified.", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py", - "ast_data": "FunctionDef name:new_locator arguments arg:self arg:ny arg:ny1 Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_variable", - "source_code": "def get_variable(self, feature_column, name): if name in self._cols_to_vars_map[feature_column]: return self._cols_to_vars_map[feature_column][name] raise ValueError('Variable does not exist.')", - "docstring": "Returns an existing variable. Args: feature_column: A object this variable corresponds to. name: variable name.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", - "ast_data": "FunctionDef name:get_variable arguments arg:self arg:feature_column arg:name If Compare op:In Return return:yes Raise raises:ValueError('Variable does not exist.')" - }, - { - "library": "matplotlib", - "name": "clip_children", - "source_code": "@property def clip_children(self): return self._clip_children", - "docstring": "If the children of this DrawingArea should be clipped by DrawingArea bounding box.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", - "ast_data": "FunctionDef name:clip_children arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_meta_graph_def_from_tags", - "source_code": "def get_meta_graph_def_from_tags(self, tags): found_match = False meta_graph_def_to_load = None available_tags = [] for meta_graph_def in self._saved_model.meta_graphs: available_tags.append(set(meta_graph_def.meta_info_def.tags)) if set(meta_graph_def.meta_info_def.tags) = = set(tags): meta_graph_def_to_load = meta_graph_def found_match = True break if not found_match: raise RuntimeError(f\"MetaGraphDef associated with tags {str(tags).strip('[]')} could not be found in SavedModel, with available tags '{available_tags}'. To inspect available tag-sets in the SavedModel, please use the SavedModel CLI: `saved_model_cli`.\") return meta_graph_def_to_load", - "docstring": "Return MetaGraphDef with the exact specified tags. Args: tags: A list or set of string tags that identify the MetaGraphDef. Returns: MetaGraphDef with the same tags. Raises: RuntimeError: if no metagraphs were found with the associated tags.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\saved_model\\loader_impl.py", - "ast_data": "FunctionDef name:get_meta_graph_def_from_tags arguments arg:self arg:tags Assign Assign Assign For If Compare op:Eq Assign Assign If Raise raises:RuntimeError(f\"MetaGraphDef associated with tags {str(tags).strip('[]')} could not be found in SavedModel, with available tags '{available_tags}'. To inspect available tag-sets in the SavedModel, please use the SavedModel CLI: `saved_model_cli`.\") Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_closed", - "source_code": "def set_closed(self, closed): if self._closed = = bool(closed): return self._closed = bool(closed) self.set_xy(self.get_xy()) self.stale = True", - "docstring": "Set whether the polygon is closed. Parameters ---------- closed : bool True if the polygon is closed", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:set_closed arguments arg:self arg:closed If Compare op:Eq Return return:no Assign Call call:bool Assign" - }, - { - "library": "virtualenv", - "name": "read_data", - "source_code": "def read_data(file, endian, num = 1): res = struct.unpack(endian + 'L' * num, file.read(num * 4)) if len(res) = = 1: return res[0] return res", - "docstring": "Read a given number of 32-bits unsigned integers from the given file with the given endianness.", - "type": "function", - "file_path": "virtualenv\\src\\virtualenv\\create\\via_global_ref\\builtin\\cpython\\mac_os.py", - "ast_data": "FunctionDef name:read_data arguments arg:file arg:endian arg:num Assign Call call:unpack If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "dimension_size", - "source_code": "def dimension_size(self, axis): if not isinstance(axis, int): raise TypeError('axis must be an integer') partitioned_ndims = len(self._partitioned_dim_sizes) if axis < partitioned_ndims: return self._partitioned_dim_sizes[axis] else: return self._inner_dim_sizes[axis - partitioned_ndims]", - "docstring": "Returns the size of slices across the specified dimension.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py", - "ast_data": "FunctionDef name:dimension_size arguments arg:self arg:axis If Raise raises:TypeError('axis must be an integer') Assign Call call:len If Compare op:Lt Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_figure", - "source_code": "def set_figure(self, fig): no_switch = 'The parent and root figures of a (Sub)Figure are set at instantiation and cannot be changed.' if fig is self._root_figure: _api.warn_deprecated('3.10', message = f'{no_switch} From Matplotlib 3.12 this operation will raise an exception.') return raise ValueError(no_switch)", - "docstring": ".. deprecated:: 3.10 Currently this method will raise an exception if *fig* is anything other than the root this (Sub)Figure is on. In future it will always raise an exception.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\figure.py", - "ast_data": "FunctionDef name:set_figure arguments arg:self arg:fig Assign If Compare op:Is Return return:no Raise raises:ValueError(no_switch)" - }, - { - "library": "scipy", - "name": "cho_solve_banded", - "source_code": "def cho_solve_banded(cb_and_lower, b, overwrite_b = False, check_finite = True): cb, lower = cb_and_lower return _cho_solve_banded(cb, b, lower, overwrite_b = overwrite_b, check_finite = check_finite)", - "docstring": "Solve the linear equations `cblowerb`. check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- x : array The solution to the system A x = b See Also -------- cholesky_banded : Cholesky factorization of a banded matrix Notes ----- .. versionadded:: 0.8.0 Examples -------- >>> import numpy as np >>> from scipy.linalg import cholesky_banded, cho_solve_banded >>> Ab = np.array([[0, 0, 1j, 2, 3j], [0, -1, -2, 3, 4], [9, 8, 7, 6, 9]]) >>> A = np.diag(Ab[0,2:], k=2) + np.diag(Ab[1,1:], k=1) >>> A = A + A.conj().T + np.diag(Ab[2, :]) >>> c = cholesky_banded(Ab) >>> x = cho_solve_banded((c, False), np.ones(5)) >>> np.allclose(A @ x - np.ones(5), np.zeros(5)) True", - "type": "function", - "file_path": "scipy\\scipy\\linalg\\_decomp_cholesky.py", - "ast_data": "FunctionDef name:cho_solve_banded arguments arg:cb_and_lower arg:b arg:overwrite_b arg:check_finite Assign Return return:yes" - }, - { - "library": "cherrypy", - "name": "ThreadManager", - "source_code": "class ThreadManager(SimplePlugin): threads = None 'A map of {thread ident: index number} pairs.' def __init__(self, bus): self.threads = {} SimplePlugin.__init__(self, bus) self.bus.listeners.setdefault('acquire_thread', set()) self.bus.listeners.setdefault('start_thread', set()) self.bus.listeners.setdefault('release_thread', set()) self.bus.listeners.setdefault('stop_thread', set()) def acquire_thread(self): thread_ident = _thread.get_ident() if thread_ident not in self.threads: i = len(self.threads) + 1 self.threads[thread_ident] = i self.bus.publish('start_thread', i) def release_thread(self): thread_ident = _thread.get_ident() i = self.threads.pop(thread_ident, None) if i is not None: self.bus.publish('stop_thread', i) def stop(self): for thread_ident, i in self.threads.items(): self.bus.publish('stop_thread', i) self.threads.clear() graceful = stop", - "docstring": "Manager for HTTP request threads. If you have control over thread creation and destruction, publish to the 'acquire_thread' and 'release_thread' channels (for each thread). This will register/unregister the current thread and publish to 'start_thread' and 'stop_thread' listeners in the bus as needed. If threads are created and destroyed by code you do not control (e.g., Apache), then, at the beginning of every HTTP request, publish to 'acquire_thread' only. You should not publish to 'release_thread' in this case, since you do not know whether the thread will be re-used or not. The bus will call 'stop_thread' listeners for you when it stops.", - "type": "class", - "file_path": "cherrypy\\cherrypy\\process\\plugins.py", - "ast_data": "ClassDef name:ThreadManager Assign FunctionDef name:__init__ arguments arg:self arg:bus Assign FunctionDef name:acquire_thread arguments arg:self Assign Call call:get_ident If Compare op:NotIn Assign Assign FunctionDef name:release_thread arguments arg:self Assign Call call:get_ident Assign Call call:pop If Compare op:IsNot FunctionDef name:stop arguments arg:self For Call call:items Assign" - }, - { - "library": "matplotlib", - "name": "set_center", - "source_code": "def set_center(self, xy): self._center = xy self.stale = True", - "docstring": "Set the center of the ellipse. Parameters ---------- xy : (float, float)", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:set_center arguments arg:self arg:xy Assign Assign" - }, - { - "library": "scipy", - "name": "Paviani", - "source_code": "class Paviani(Benchmark): def __init__(self, dimensions = 10): Benchmark.__init__(self, dimensions) self._bounds = list(zip([2.001] * self.N, [9.999] * self.N)) self.global_optimum = [[9.350266 for _ in range(self.N)]] self.fglob = -45.7784684040686 def fun(self, x, *args): self.nfev + = 1 return sum(log(x - 2) ** 2.0 + log(10.0 - x) ** 2.0) - prod(x) ** 0.2", - "docstring": "Paviani objective function. This class defines the Paviani [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Paviani}}(x) = \\sum_{i=1}^{10} \\left[\\log^{2}\\left(10 - x_i\\right) + \\log^{2}\\left(x_i -2\\right)\\right] - \\left(\\prod_{i=1}^{10} x_i^{10} \\right)^{0.2} with :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: think Gavana web/code definition is wrong because final product term shouldn't raise x to power 10.", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_P.py", - "ast_data": "ClassDef name:Paviani FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Return return:yes" - }, - { - "library": "tensorflow", - "name": "parse_expression", - "source_code": "def parse_expression(src): src = STANDARD_PREAMBLE + src.strip() node = parse(src, preamble_len = STANDARD_PREAMBLE_LEN, single_node = True) if __debug__: if not isinstance(node, gast.Expr): raise ValueError('expected exactly one node of type Expr, got {}'.format(node)) return node.value", - "docstring": "Returns the AST of given identifier. Args: src: A piece of code that represents a single Python expression Returns: A gast.AST object. Raises: ValueError: if src does not consist of a single Expression.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\parser.py", - "ast_data": "FunctionDef name:parse_expression arguments arg:src Assign Assign Call call:parse If If Raise raises:ValueError('expected exactly one node of type Expr, got {}'.format(node)) Return return:yes" - }, - { - "library": "kornia", - "name": "get_hanning_kernel2d", - "source_code": "def get_hanning_kernel2d(kernel_size: tuple[int, int] | int, device: Optional[Device] = None, dtype: Optional[Dtype] = None) -> Tensor: kernel_size = _unpack_2d_ks(kernel_size) _check_kernel_size(kernel_size, 2, allow_even = True) ky = get_hanning_kernel1d(kernel_size[0], device, dtype)[None].T kx = get_hanning_kernel1d(kernel_size[1], device, dtype)[None] kernel2d = ky @ kx return kernel2d", - "docstring": "Return 2d Hanning kernel, used in signal processing and KCF tracker. Args: kernel_size: The size of the kernel for the filter. It should be positive. device: tensor device desired to create the kernel dtype: tensor dtype desired to create the kernel Returns: 2D tensor with Hanning filter coefficients. Shape: math: .. math:: w(n) = 0.5 - 0.5cos\\\\left(\\\\frac{2\\\\pi{n}}{M-1}\\\\right)", - "type": "function", - "file_path": "kornia\\kornia\\filters\\kernels.py", - "ast_data": "FunctionDef name:get_hanning_kernel2d arguments arg:kernel_size type:tuple[int, int] | int arg:device type:Optional[Device] arg:dtype type:Optional[Dtype] Assign Call call:_unpack_2d_ks Assign Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "diff", - "source_code": "def diff(self, other): r = set(self.nn_modules.keys()).difference(set(other.nn_modules.keys())) if len(r) = = 0: return None return r", - "docstring": "Produces a delta against another ModuleContextCheckpointState. Returns None if no delta is found, otherwise, return a set() of mismatched module key names.", - "type": "method", - "file_path": "pytorch\\torch\\_guards.py", - "ast_data": "FunctionDef name:diff arguments arg:self arg:other Assign Call call:difference If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "o", - "source_code": "def o(self): return self.data.isocalendar().year", - "docstring": "ISO 8601 year number matching the ISO week number (W)", - "type": "method", - "file_path": "django\\django\\utils\\dateformat.py", - "ast_data": "FunctionDef name:o arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "merge_with", - "source_code": "def merge_with(func, *dicts, **kwargs): if len(dicts) = = 1 and (not isinstance(dicts[0], Mapping)): dicts = dicts[0] factory = _get_factory(merge_with, kwargs) result = factory() for d in dicts: for k, v in d.items(): if k not in result: result[k] = [v] else: result[k].append(v) return valmap(func, result, factory)", - "docstring": "Merge dictionaries and apply function to combined values A key may occur in more than one dict, and all values mapped from the key will be passed to the function as a list, such as func([val1, val2, ...]). >>> merge_with(sum, {1: 1, 2: 2}, {1: 10, 2: 20}) {1: 11, 2: 22} >>> merge_with(first, {1: 1, 2: 2}, {2: 20, 3: 30}) # doctest: +SKIP {1: 1, 2: 2, 3: 30} See Also: merge", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\unification\\unification_tools.py", - "ast_data": "FunctionDef name:merge_with arguments arg:func vararg:dicts kwarg:kwargs If BoolOp Compare op:Eq Assign Assign Call call:_get_factory Assign Call call:factory For For Call call:items If Compare op:NotIn Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "constant_to_device", - "source_code": "def constant_to_device(self, device: torch.device) -> IRNode: loader = self.make_loader() loader = patch.object(ConstantBuffer, 'override_device', device)(loader) return Pointwise(device = device, dtype = self.get_dtype(), inner_fn = loader, ranges = self.get_size())", - "docstring": "Move this to a given device. Requires that all reads are to constants.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\ir.py", - "ast_data": "FunctionDef name:constant_to_device arguments arg:self arg:device type:torch.device Assign Call call:make_loader Assign Call Return return:yes" - }, - { - "library": "pytorch", - "name": "CallgrindModuleType", - "source_code": "class CallgrindModuleType(Protocol): __file__: str __name__: str def _valgrind_supported_platform(self) -> bool: ... def _valgrind_toggle(self) -> None: ...", - "docstring": "Replicates the valgrind endpoints in . These bindings are used to collect Callgrind profiles on earlier versions of PyTorch and will eventually be removed.", - "type": "class", - "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\_stubs.py", - "ast_data": "ClassDef name:CallgrindModuleType FunctionDef name:_valgrind_supported_platform arguments arg:self FunctionDef name:_valgrind_toggle arguments arg:self" - }, - { - "library": "tensorflow", - "name": "children", - "source_code": "def children(self, obj, save_type = base.SaveType.CHECKPOINT, **kwargs): children = {} for name, ref in self.list_children(obj, **kwargs): children[name] = ref return children", - "docstring": "Returns all child trackables attached to obj. Args: obj: A object. save_type: A string, can be 'savedmodel' or 'checkpoint'. **kwargs: kwargs to use when retrieving the object's children. Returns: Dictionary of all children attached to the object with name to trackable.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\graph_view.py", - "ast_data": "FunctionDef name:children arguments arg:self arg:obj arg:save_type kwarg:kwargs Assign For Call call:list_children Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "set_all_reduce_hook", - "source_code": "def set_all_reduce_hook(self, hook: Callable[[torch.Tensor], None], *, stream: Optional[torch.cuda.Stream] = None): state = self._get_fsdp_state() if (fsdp_param_group: = state._fsdp_param_group) is not None: fsdp_param_group._all_reduce_hook = hook if stream is not None: if fsdp_param_group._is_hsdp: raise ValueError('stream cannot be set when using native HSDP') fsdp_param_group._all_reduce_hook_stream = stream", - "docstring": "Args: hook (Callable[[torch.Tensor], None]): User-defined all-reduce hook with expected signature `` is the reduce-scatter output if only using FSDP or the all-reduce output if using native HSDP. stream (Optional[torch.cuda.Stream]): Stream to run the all-reduce hook in. This should only be set if not using native HSDP. If using native HSDP, the hook will run in the internally defined all-reduce stream used by the native HSDP all-reduce.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py", - "ast_data": "FunctionDef name:set_all_reduce_hook arguments arg:self arg:hook type:Callable[[torch.Tensor], None] Assign Call call:_get_fsdp_state If Compare op:IsNot Assign If Compare op:IsNot If Raise raises:ValueError('stream cannot be set when using native HSDP') Assign" - }, - { - "library": "pytorch", - "name": "broadcast_types", - "source_code": "def broadcast_types(t1, t2): if t1 = = Dyn or t2 = = Dyn or isinstance(t1, Var) or isinstance(t2, Var): return (t1, t2) if isinstance(t1, TensorType) and isinstance(t2, TensorType): s1 = len(t1.__args__) s2 = len(t2.__args__) new_t1 = list(t1.__args__) new_t2 = list(t2.__args__) if s1 > s2: for i in range(s1 - s2): new_t2.insert(0, 1) elif s2 > s1: for i in range(s2 - s1): new_t1.insert(0, 1) for i, (x, y) in enumerate(zip(new_t1, new_t2)): if x = = 1: new_t1[i] = y elif y = = 1: new_t2[i] = x t1, t2 = (TensorType(tuple(new_t1)), TensorType(tuple(new_t2))) return (t1, t2) else: raise TypeError(f'Cannot broadcast types {t1} and {t2}')", - "docstring": "Applies broadcasting to both given types such that they become consistent with eachother and returns two new resulting types", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py", - "ast_data": "FunctionDef name:broadcast_types arguments arg:t1 arg:t2 If BoolOp Compare op:Eq Compare op:Eq Call call:isinstance Call call:isinstance Return return:yes If BoolOp Call call:isinstance Call call:isinstance Assign Call call:len Assign Call call:len Assign Call call:list Assign Call call:list If Compare op:Gt For Call call:range If Compare op:Gt For Call call:range For Call call:enumerate If Compare op:Eq Assign If Compare op:Eq Assign Assign Return return:yes Raise raises:TypeError(f'Cannot broadcast types {t1} and {t2}')" - }, - { - "library": "pytorch", - "name": "predict", - "source_code": "def predict(self, input: Tensor) -> Tensor: head_output = self.head(input) output = torch.argmax(head_output, dim = 1) not_in_shortlist = output > = self.shortlist_size all_in_shortlist = not not_in_shortlist.any() if all_in_shortlist: return output elif not_in_shortlist.all(): log_prob = self._get_full_log_prob(input, head_output) return torch.argmax(log_prob, dim = 1) else: log_prob = self._get_full_log_prob(input[not_in_shortlist], head_output[not_in_shortlist]) output[not_in_shortlist] = torch.argmax(log_prob, dim = 1) return output", - "docstring": "Return the class with the highest probability for each example in the input minibatch. This is equivalent to `(N, \\texttt{in\\_features})(N)`", - "type": "method", - "file_path": "pytorch\\torch\\nn\\modules\\adaptive.py", - "ast_data": "FunctionDef name:predict arguments arg:self arg:input type:Tensor Assign Call call:head Assign Call call:argmax Assign Compare op:GtE Assign If Return return:yes If Call call:all Assign Call call:_get_full_log_prob Return return:yes Assign Call call:_get_full_log_prob Assign Call call:argmax Return return:yes" - }, - { - "library": "tensorflow", - "name": "LegacyTypeSpecBatchEncoder", - "source_code": "class LegacyTypeSpecBatchEncoder(TypeSpecBatchEncoder): def batch(self, type_spec, batch_size): return type_spec._batch(batch_size) def unbatch(self, type_spec): return type_spec._unbatch() def encode(self, type_spec, value, minimum_rank = 0): if minimum_rank = = 0: return type_spec._to_tensor_list(value) elif minimum_rank = = 1: if not isinstance(type_spec, BatchableTypeSpec): raise ValueError(f'{type_spec.__name__}.encode does not support minimum_rank>0.') return type_spec._to_batched_tensor_list(value) else: raise ValueError(f'{type_spec.__name__}.encode does not support minimum_rank>1.') def decode(self, type_spec, encoded_value): return type_spec._from_tensor_list(encoded_value) def encoding_specs(self, spec): return spec._flat_tensor_specs", - "docstring": "TypeSpecBatchEncoder for legacy composite tensor classes. TODO(edloper): Update existing composite tensors to use non-legacy CompositeTensorBatchEncoders.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py", - "ast_data": "ClassDef name:LegacyTypeSpecBatchEncoder FunctionDef name:batch arguments arg:self arg:type_spec arg:batch_size Return return:yes FunctionDef name:unbatch arguments arg:self arg:type_spec Return return:yes FunctionDef name:encode arguments arg:self arg:type_spec arg:value arg:minimum_rank If Compare op:Eq Return return:yes If Compare op:Eq If Raise raises:ValueError(f'{type_spec.__name__}.encode does not support minimum_rank>0.') Return return:yes Raise raises:ValueError(f'{type_spec.__name__}.encode does not support minimum_rank>1.') FunctionDef name:decode arguments arg:self arg:type_spec arg:encoded_value Return return:yes FunctionDef name:encoding_specs arguments arg:self arg:spec Return return:yes" - }, - { - "library": "matplotlib", - "name": "make_png", - "source_code": "@classmethod def make_png(cls, tex, fontsize, dpi): basefile = cls.get_basefile(tex, fontsize, dpi) pngfile = '%s.png' % basefile if not os.path.exists(pngfile): dvifile = cls.make_dvi(tex, fontsize) cmd = ['dvipng', '-bg', 'Transparent', '-D', str(dpi), '-T', 'tight', '-o', pngfile, dvifile] if getattr(mpl, '_called_from_pytest', False) and mpl._get_executable_info('dvipng').raw_version ! = '1.16': cmd.insert(1, '--freetype0') cls._run_checked_subprocess(cmd, tex) return pngfile", - "docstring": "Generate a png file containing latex's rendering of tex string. Return the file name.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py", - "ast_data": "FunctionDef name:make_png arguments arg:cls arg:tex arg:fontsize arg:dpi Assign Call call:get_basefile Assign If Assign Call call:make_dvi Assign If BoolOp Call call:getattr Compare op:NotEq Return return:yes" - }, - { - "library": "pytorch", - "name": "impl_save_for_backward", - "source_code": "def impl_save_for_backward(self, _stacklevel = 2): def inner(f): self._check_can_register_backward() self._check_doesnt_have_library_autograd_impl() if not self._registered_autograd_kernel_indirection: self._register_autograd_kernel_indirection() self._register_impl('save_for_backward', f, stacklevel = _stacklevel) if self._has_impl('backward'): self._register_autograd_kernel() return inner", - "docstring": "Register a function that tells us what to save for backward. Please see impl_backward for more details.", - "type": "method", - "file_path": "pytorch\\torch\\_custom_op\\impl.py", - "ast_data": "FunctionDef name:impl_save_for_backward arguments arg:self arg:_stacklevel FunctionDef name:inner arguments arg:f If If Call call:_has_impl Return return:yes" - }, - { - "library": "pytorch", - "name": "add_dependency", - "source_code": "def add_dependency(self, module_name: str, dependencies = True): if module_name in self.dependency_graph and self.dependency_graph.nodes[module_name].get('provided') is True: return if module_name = = 'torch_package_importer': self.dependency_graph.add_node(module_name, action = _ModuleProviderAction.SKIP, provided = True) return if module_name = = '_mock': self.dependency_graph.add_node(module_name, action = _ModuleProviderAction.REPACKAGED_MOCK_MODULE, provided = True) return if self._can_implicitly_extern(module_name): self.dependency_graph.add_node(module_name, action = _ModuleProviderAction.EXTERN, provided = True) return for pattern, pattern_info in self.patterns.items(): if pattern.matches(module_name): pattern_info.was_matched = True self.dependency_graph.add_node(module_name, action = pattern_info.action, provided = True) if pattern_info.action = = _ModuleProviderAction.DENY: self.dependency_graph.add_node(module_name, error = PackagingErrorReason.DENIED) if pattern_info.action = = _ModuleProviderAction.INTERN: self._intern_module(module_name, dependencies) return self.dependency_graph.add_node(module_name, error = PackagingErrorReason.NO_ACTION)", - "docstring": "Given a module, add it to the dependency graph according to patterns specified by the user.", - "type": "method", - "file_path": "pytorch\\torch\\package\\package_exporter.py", - "ast_data": "FunctionDef name:add_dependency arguments arg:self arg:module_name type:str arg:dependencies If BoolOp Compare op:In Compare op:Is Return return:no If Compare op:Eq Return return:no If Compare op:Eq Return return:no If Call call:_can_implicitly_extern Return return:no For Call call:items If Call call:matches Assign If Compare op:Eq If Compare op:Eq Return return:no" - }, - { - "library": "tensorflow", - "name": "name_scope", - "source_code": "def name_scope(name, default_name = None, values = None, skip_on_eager = True) -> ContextManager[Optional[str]]: if not context.executing_eagerly(): return internal_name_scope_v1(name, default_name, values) if skip_on_eager: return NullContextmanager() name = default_name if name is None else name if values: graph_value = next((value for value in values if is_symbolic_tensor(value)), None) if graph_value is not None: return graph_value.graph.name_scope(name) return name_scope_v2(name or '')", - "docstring": "Internal-only entry point for . Internal ops do not use the public API and instead rely on regardless of the execution mode. This function dispatches to the correct implementation based on the arguments provided and the current mode. Specifically, * if contains a graph tensor is used; * is used in graph mode; * -- in eager mode. Args: name: The name argument that is passed to the op function. default_name: The default name to use if the argument is . values: The list of arguments that are passed to the op function. skip_on_eager: Indicates to return NullContextmanager if executing eagerly. By default this is True since naming tensors and operations in eager mode have little use and cause unnecessary performance overhead. However, it is important to preserve variable names since they are often useful for debugging and saved models. Returns: context manager.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", - "ast_data": "FunctionDef name:name_scope arguments arg:name arg:default_name arg:values arg:skip_on_eager If Return return:yes If Return return:yes Assign If Assign Call call:next If Compare op:IsNot Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "moments", - "source_code": "@tf_export(v1 = ['nn.moments']) @dispatch.add_dispatch_support def moments(x, axes, shift = None, name = None, keep_dims = None, keepdims = None): keep_dims = deprecated_argument_lookup('keepdims', keepdims, 'keep_dims', keep_dims) if keep_dims is None: keep_dims = False with ops.name_scope(name, 'moments', [x, axes]): y = math_ops.cast(x, dtypes.float32) if x.dtype = = dtypes.float16 else x mean = math_ops.reduce_mean(y, axes, keepdims = True, name = 'mean') variance = math_ops.reduce_mean(math_ops.squared_difference(y, array_ops.stop_gradient(mean)), axes, keepdims = True, name = 'variance') if not keep_dims: mean = array_ops.squeeze(mean, axes) variance = array_ops.squeeze(variance, axes) if x.dtype = = dtypes.float16: return (math_ops.cast(mean, dtypes.float16), math_ops.cast(variance, dtypes.float16)) else: return (mean, variance)", - "docstring": "Calculate the mean and variance of . The mean and variance are calculated by aggregating the contents of across . If is 1-D and this is just the mean and variance of a vector. Note: shift is currently not used; the true mean is computed and used. When using these moments for batch normalization (see ): * for so-called \"global normalization\", used with convolutional filters with shape , pass . * for simple batch normalization pass (batch only). Args: x: A . axes: Array of ints. Axes along which to compute mean and variance. shift: Not used in the current implementation name: Name used to scope the operations that compute the moments. keep_dims: produce moments with the same dimensionality as the input. keepdims: Alias to keep_dims. Returns: Two objects: and .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_impl.py", - "ast_data": "FunctionDef name:moments arguments arg:x arg:axes arg:shift arg:name arg:keep_dims arg:keepdims Call call:tf_export Assign Call call:deprecated_argument_lookup If Compare op:Is Assign With Assign Assign Call call:reduce_mean Assign Call call:reduce_mean If Assign Call call:squeeze Assign Call call:squeeze If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "get_app_template_dirs", - "source_code": "@functools.lru_cache def get_app_template_dirs(dirname): return tuple((path for app_config in apps.get_app_configs() if app_config.path and (path: = (Path(app_config.path) / dirname)).is_dir()))", - "docstring": "Return an iterable of paths of directories to load app templates from. dirname is the name of the subdirectory containing templates inside installed applications.", - "type": "function", - "file_path": "django\\django\\template\\utils.py", - "ast_data": "FunctionDef name:get_app_template_dirs arguments arg:dirname Return return:yes" - }, - { - "library": "coconut", - "name": "__repr__", - "source_code": "def __repr__(self): if not logger.tracing: logger.warn_err(CoconutInternalException('ComputationNode.__repr__ called when not tracing')) inner_repr = '\\n'.join(('\\t' + line for line in repr(self.tokens).splitlines())) if self.pprinting: return '(\"' + self.name + '\", \\n' + inner_repr + '\\n)' else: return self.name + '(\\n' + inner_repr + '\\n)'", - "docstring": "Get a representation of the entire computation graph below this node.", - "type": "method", - "file_path": "coconut\\coconut\\compiler\\util.py", - "ast_data": "FunctionDef name:__repr__ arguments arg:self If Assign Call call:join If Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_linewidth", - "source_code": "def set_linewidth(self, lw): if lw is None: lw = self._get_default_linewidth() self._us_lw = np.atleast_1d(lw) self._linewidths, self._linestyles = self._bcast_lwls(self._us_lw, self._us_linestyles) self.stale = True", - "docstring": "Set the linewidth(s) for the collection. *lw* can be a scalar or a sequence; if it is a sequence the patches will cycle through the sequence Parameters ---------- lw : float or list of floats", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\collections.py", - "ast_data": "FunctionDef name:set_linewidth arguments arg:self arg:lw If Compare op:Is Assign Call call:_get_default_linewidth Assign Call call:atleast_1d Assign Call call:_bcast_lwls Assign" - }, - { - "library": "tensorflow", - "name": "embedding_feature", - "source_code": "@property def embedding_feature(self): return HardwareFeature._embedding_feature_proto_to_string(self.tpu_hardware_feature_proto.embedding_feature)", - "docstring": "TPU embedding feature. Returns: An EmbeddingFeature enum.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_hardware_feature.py", - "ast_data": "FunctionDef name:embedding_feature arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "Bohachevsky2", - "source_code": "class Bohachevsky2(Benchmark): def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N)) self.global_optimum = [[0 for _ in range(self.N)]] self.fglob = 0.0 def fun(self, x, *args): self.nfev + = 1 return x[0] ** 2 + 2 * x[1] ** 2 - 0.3 * cos(3 * pi * x[0]) * cos(4 * pi * x[1]) + 0.3", - "docstring": "Bohachevsky 2 objective function. The Bohachevsky 2 [1]_ global optimization problem is a multimodal minimization problem defined as follows .. math:: f_{\\text{Bohachevsky}}(x) = \\sum_{i=1}^{n-1}\\left[x_i^2 + 2 x_{i+1}^2 - 0.3 \\cos(3 \\pi x_i) - 0.4 \\cos(4 \\pi x_{i + 1}) + 0.7 \\right] Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: equation needs to be fixed up in the docstring. Jamil is also wrong. There should be no 0.4 factor in front of the cos term", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_B.py", - "ast_data": "ClassDef name:Bohachevsky2 FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Return return:yes" - }, - { - "library": "scikit-learn", - "name": "setdiff1d", - "source_code": "def setdiff1d(x1: Array | complex, x2: Array | complex, /, *, assume_unique: bool = False, xp: ModuleType | None = None) -> Array: if xp is None: xp = array_namespace(x1, x2) x1_, x2_ = asarrays(x1, x2, xp = xp) if assume_unique: x1_ = xp.reshape(x1_, (-1,)) x2_ = xp.reshape(x2_, (-1,)) else: x1_ = xp.unique_values(x1_) x2_ = xp.unique_values(x2_) return x1_[_helpers.in1d(x1_, x2_, assume_unique = True, invert = True, xp = xp)]", - "docstring": "Find the set difference of two arrays. Return the unique values in that are not in . Parameters ---------- x1 : array | int | float | complex | bool Input array. x2 : array Input comparison array. assume_unique : bool If `x1x2x1x2assume_unique`, but otherwise only sorted if the input is sorted. Examples -------- >>> import array_api_strict as xp >>> import array_api_extra as xpx >>> x1 = xp.asarray([1, 2, 3, 2, 4, 1]) >>> x2 = xp.asarray([3, 4, 5, 6]) >>> xpx.setdiff1d(x1, x2, xp=xp) Array([1, 2], dtype=array_api_strict.int64)", - "type": "function", - "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_funcs.py", - "ast_data": "FunctionDef name:setdiff1d arguments If Compare op:Is Assign Call call:array_namespace Assign Call call:asarrays If Assign Call call:reshape Assign Call call:reshape Assign Call call:unique_values Assign Call call:unique_values Return return:yes" - }, - { - "library": "django", - "name": "read", - "source_code": "def read(self, wkb): if isinstance(wkb, memoryview): wkb_s = bytes(wkb) return wkb_reader_read(self.ptr, wkb_s, len(wkb_s)) elif isinstance(wkb, bytes): return wkb_reader_read_hex(self.ptr, wkb, len(wkb)) elif isinstance(wkb, str): wkb_s = wkb.encode() return wkb_reader_read_hex(self.ptr, wkb_s, len(wkb_s)) else: raise TypeError", - "docstring": "Return a _pointer_ to C GEOS Geometry object from the given WKB.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\geos\\prototypes\\io.py", - "ast_data": "FunctionDef name:read arguments arg:self arg:wkb If Call call:isinstance Assign Call call:bytes Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Assign Call call:encode Return return:yes Raise raises:TypeError" - }, - { - "library": "authlib", - "name": "add_to_body", - "source_code": "def add_to_body(token, body = None): if body is None: body = '' return add_params_to_qs(body, [('access_token', token)])", - "docstring": "Add a Bearer Token to the request body. access_token=h480djs93hd8", - "type": "function", - "file_path": "authlib\\authlib\\oauth2\\rfc6750\\parameters.py", - "ast_data": "FunctionDef name:add_to_body arguments arg:token arg:body If Compare op:Is Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "loss_masks", - "source_code": "def loss_masks(self, outputs, targets, indices, num_boxes): assert 'pred_masks' in outputs src_idx = self._get_src_permutation_idx(indices) tgt_idx = self._get_tgt_permutation_idx(indices) src_masks = outputs['pred_masks'] target_masks, valid = nested_tensor_from_tensor_list([t['masks'] for t in targets]).decompose() target_masks = target_masks.to(src_masks) src_masks = src_masks[src_idx] src_masks = interpolate(src_masks[:, None], size = target_masks.shape[-2:], mode = 'bilinear', align_corners = False) src_masks = src_masks[:, 0].flatten(1) target_masks = target_masks[tgt_idx].flatten(1) losses = {'loss_mask': sigmoid_focal_loss(src_masks, target_masks, num_boxes), 'loss_dice': dice_loss(src_masks, target_masks, num_boxes)} return losses", - "docstring": "Compute the losses related to the masks: the focal loss and the dice loss. targets dicts must contain the key \"masks\" containing a tensor of dim [nb_target_boxes, h, w]", - "type": "method", - "file_path": "pytorch\\benchmarks\\functional_autograd_benchmark\\torchvision_models.py", - "ast_data": "FunctionDef name:loss_masks arguments arg:self arg:outputs arg:targets arg:indices arg:num_boxes Assign Call call:_get_src_permutation_idx Assign Call call:_get_tgt_permutation_idx Assign Assign Call call:decompose Assign Call call:to Assign Assign Call call:interpolate Assign Call call:flatten Assign Call call:flatten Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "structured_outputs", - "source_code": "@property def structured_outputs(self): return self._func_graph.structured_outputs", - "docstring": "Returns outputs in as returned by the original function.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py", - "ast_data": "FunctionDef name:structured_outputs arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "get_split_size", - "source_code": "def get_split_size(dim_size, chunks): return (dim_size + chunks - 1) // chunks", - "docstring": "Computes the split size inline with ``. Returns: An int indicating the split size to use.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\_internals.py", - "ast_data": "FunctionDef name:get_split_size arguments arg:dim_size arg:chunks Return return:yes" - }, - { - "library": "pygame", - "name": "note_off", - "source_code": "def note_off(self, note, velocity = 0, channel = 0): if not 0 < = channel < = 15: raise ValueError('Channel not between 0 and 15.') self.write_short(128 + channel, note, velocity)", - "docstring": "turns a midi note off. Note must be on. Output.note_off(note, velocity=0, channel=0) note is an integer from 0 to 127 velocity is an integer from 0 to 127 (release velocity) channel is an integer from 0 to 15 Turn a note off in the output stream. The note must already be on for this to work correctly.", - "type": "method", - "file_path": "pygame\\src_py\\midi.py", - "ast_data": "FunctionDef name:note_off arguments arg:self arg:note arg:velocity arg:channel If Raise raises:ValueError('Channel not between 0 and 15.')" - }, - { - "library": "tensorflow", - "name": "summary", - "source_code": "def summary(self, line_length = None, positions = None, print_fn = None): if not self.built: raise ValueError('This model has not yet been built. Build the model first by calling `build()` or calling `fit()` with some data, or specify an `input_shape` argument in the first layer(s) for automatic build.') layer_utils.print_summary(self, line_length = line_length, positions = positions, print_fn = print_fn)", - "docstring": "Prints a string summary of the network. Args: line_length: Total length of printed lines (e.g. set this to adapt the display to different terminal window sizes). positions: Relative or absolute positions of log elements in each line. If not provided, defaults to . print_fn: Print function to use. Defaults to . It will be called on each line of the summary. You can set it to a custom function in order to capture the string summary. Raises: ValueError: if is called before the model is built.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py", - "ast_data": "FunctionDef name:summary arguments arg:self arg:line_length arg:positions arg:print_fn If Raise raises:ValueError('This model has not yet been built. Build the model first by calling `build()` or calling `fit()` with some data, or specify an `input_shape` argument in the first layer(s) for automatic build.')" - }, - { - "library": "django", - "name": "do_block", - "source_code": "@register.tag('block') def do_block(parser, token): bits = token.contents.split() if len(bits) ! = 2: raise TemplateSyntaxError(\"'%s' tag takes only one argument\" % bits[0]) block_name = bits[1] try: if block_name in parser.__loaded_blocks: raise TemplateSyntaxError(\"'%s' tag with name '%s' appears more than once\" % (bits[0], block_name)) parser.__loaded_blocks.append(block_name) except AttributeError: parser.__loaded_blocks = [block_name] nodelist = parser.parse(('endblock',)) endblock = parser.next_token() acceptable_endblocks = ('endblock', 'endblock %s' % block_name) if endblock.contents not in acceptable_endblocks: parser.invalid_block_tag(endblock, 'endblock', acceptable_endblocks) return BlockNode(block_name, nodelist)", - "docstring": "Define a block that can be overridden by child templates.", - "type": "function", - "file_path": "django\\django\\template\\loader_tags.py", - "ast_data": "FunctionDef name:do_block arguments arg:parser arg:token Call call:tag Assign Call call:split If Compare op:NotEq Raise raises:TemplateSyntaxError(\"'%s' tag takes only one argument\" % bits[0]) Assign Try If Compare op:In Raise raises:TemplateSyntaxError(\"'%s' tag with name '%s' appears more than once\" % (bits[0], block_name)) ExceptHandler Assign Assign Call call:parse Assign Call call:next_token Assign If Compare op:NotIn Return return:yes" - }, - { - "library": "pandas", - "name": "is_open", - "source_code": "@property def is_open(self) -> bool: if self._handle is None: return False return bool(self._handle.isopen)", - "docstring": "return a boolean indicating whether the file is open", - "type": "method", - "file_path": "pandas\\pandas\\io\\pytables.py", - "ast_data": "FunctionDef name:is_open arguments arg:self If Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "scikit-learn", - "name": "norm_diff", - "source_code": "def norm_diff(A, norm = 2, msg = True, random_state = None): if msg: print('... computing %s norm ...' % norm) if norm = = 2: v0 = _init_arpack_v0(min(A.shape), random_state) value = sp.sparse.linalg.svds(A, k = 1, return_singular_vectors = False, v0 = v0) elif sp.sparse.issparse(A): value = sp.sparse.linalg.norm(A, ord = norm) else: value = sp.linalg.norm(A, ord = norm) return value", - "docstring": "Compute the norm diff with the original matrix, when randomized SVD is called with *params. norm: 2 => spectral; 'fro' => Frobenius", - "type": "function", - "file_path": "scikit-learn\\benchmarks\\bench_plot_randomized_svd.py", - "ast_data": "FunctionDef name:norm_diff arguments arg:A arg:norm arg:msg arg:random_state If If Compare op:Eq Assign Call call:_init_arpack_v0 Assign Call call:svds If Call call:issparse Assign Call call:norm Assign Call call:norm Return return:yes" - }, - { - "library": "pytorch", - "name": "append_step", - "source_code": "def append_step(self, step: InputAdaptStep) -> None: self._steps.append(step)", - "docstring": "Appends a step to the input adapt steps. Args: step: The step to append.", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py", - "ast_data": "FunctionDef name:append_step arguments arg:self arg:step type:InputAdaptStep" - }, - { - "library": "pytorch", - "name": "copy_args_to_cpu_if_needed", - "source_code": "def copy_args_to_cpu_if_needed(self, *args, **kwargs): if not self.optimize_mem: return {} copies = {} budget = torch.cuda.max_memory_allocated() - torch.cuda.memory_allocated() def maybe_copy(name, arg): if name in self.mutated_arg_names and arg.is_cuda: nonlocal budget assert isinstance(arg, torch.Tensor) required_storage_length = compute_required_storage_length(arg.size(), arg.stride(), 0) size = required_storage_length * arg.element_size() if size > budget: cpu_arg = torch.empty_strided((required_storage_length,), (1,), dtype = arg.dtype, device = 'cpu', pin_memory = True) cpu_arg.copy_(arg.as_strided((required_storage_length,), (1,)), non_blocking = True) copies[name] = (arg, cpu_arg) else: budget - = size for name, arg in zip(self.fn.arg_names, args): maybe_copy(name, arg) for name, arg in kwargs.items(): maybe_copy(name, arg) return copies", - "docstring": "To support benchmarking in the presence of mutated args, we need to avoid autotuning contanminating them. We try to pass cloned args to the kernel. If those clones would increase the peak memory usage, however, we instead copy to cpu and restore them after each iteration. Figure out the args to be copied and do the copying.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\runtime\\triton_heuristics.py", - "ast_data": "FunctionDef name:copy_args_to_cpu_if_needed arguments arg:self vararg:args kwarg:kwargs If Return return:yes Assign Assign FunctionDef name:maybe_copy arguments arg:name arg:arg If BoolOp Compare op:In Assign Call call:compute_required_storage_length Assign If Compare op:Gt Assign Call call:empty_strided Assign For Call call:zip For Call call:items Return return:yes" - }, - { - "library": "django", - "name": "getInnerText", - "source_code": "def getInnerText(node): inner_text = [] for child in node.childNodes: if child.nodeType = = child.TEXT_NODE or child.nodeType = = child.CDATA_SECTION_NODE: inner_text.append(child.data) elif child.nodeType = = child.ELEMENT_NODE: inner_text.extend(getInnerText(child)) else: pass return ''.join(inner_text)", - "docstring": "Get all the inner text of a DOM node (recursively).", - "type": "function", - "file_path": "django\\django\\core\\serializers\\xml_serializer.py", - "ast_data": "FunctionDef name:getInnerText arguments arg:node Assign For If BoolOp Compare op:Eq Compare op:Eq If Compare op:Eq Return return:yes" - }, - { - "library": "tensorflow", - "name": "RegisterPForWithArgs", - "source_code": "class RegisterPForWithArgs(RegisterPFor): def __init__(self, op_type, *args, **kw_args): super(RegisterPForWithArgs, self).__init__(op_type) self._args = args self._kw_args = kw_args def __call__(self, converter): def _f(pfor_input: _PforInput): return converter(pfor_input, self.op_type, *self._args, **self._kw_args) super(RegisterPForWithArgs, self).__call__(_f) return converter", - "docstring": "Utility to register converters for pfor. Usage: @RegisteRPFor(foo_op_type, foo=value, ....) def _foo_converter(pfor_input, foo=None, ....): ... See RegisterPFor for details on the conversion function. allows binding extra arguments to the conversion function at registration time.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", - "ast_data": "ClassDef name:RegisterPForWithArgs FunctionDef name:__init__ arguments arg:self arg:op_type vararg:args kwarg:kw_args Assign Assign FunctionDef name:__call__ arguments arg:self arg:converter FunctionDef name:_f arguments arg:pfor_input type:_PforInput Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "get_bwd_send_ops", - "source_code": "def get_bwd_send_ops(self, bwd_chunk_id: int) -> list[dist.P2POp]: self._check_chunk_id(bwd_chunk_id) if not self.has_backward or self.is_first: return [] if self.grad_send_info is None: self.grad_send_info = self._create_grad_send_info(self.args_recv_info[0]) ops: list[dist.P2POp] = [] grads_input = self.bwd_cache.pop(bwd_chunk_id) for grad, grad_recv_stage in zip(grads_input, self.grad_send_info): if isinstance(grad, torch.Tensor) and grad_recv_stage is not None: logger.debug('%s Sending gradient to Stage %s: %s', self.log_prefix, grad_recv_stage, grad.size()) peer_rank = self.stage_index_to_group_rank[grad_recv_stage] peer_global_rank = peer_rank if self.group is None else dist.get_global_rank(self.group, peer_rank) ops.append(dist.P2POp(dist.isend, grad, peer_global_rank, self.group)) elif not (grad is None and grad_recv_stage is None): raise RuntimeError(f'[{self.stage_index}] for chunk {bwd_chunk_id} has gradients {grad} and is expecting to send gradients to stage {grad_recv_stage}') return ops", - "docstring": "Get the gradient send ops for current stage's backward.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py", - "ast_data": "FunctionDef name:get_bwd_send_ops arguments arg:self arg:bwd_chunk_id type:int If BoolOp Return return:yes If Compare op:Is Assign Call call:_create_grad_send_info Assign Call call:pop For Call call:zip If BoolOp Call call:isinstance Compare op:IsNot Assign Assign If Raise raises:RuntimeError(f'[{self.stage_index}] for chunk {bwd_chunk_id} has gradients {grad} and is expecting to send gradients to stage {grad_recv_stage}') Return return:yes" - }, - { - "library": "kornia", - "name": "fx", - "source_code": "@property def fx(self) -> Tensor: return self.rectified_left_camera[..., 0, 0]", - "docstring": "Return the focal length in the x-direction. Note that the focal lengths of the rectified left and right camera are assumed to be equal. Returns: tensor of shape :math:", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\camera\\stereo.py", - "ast_data": "FunctionDef name:fx arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, filename, dpi): _log.debug('Dvi: %s', filename) self.file = open(filename, 'rb') self.dpi = dpi self.fonts = {} self.state = _dvistate.pre self._missing_font = None", - "docstring": "Read the data from the file named *filename* and convert TeX's internal units to units of *dpi* per inch. *dpi* only sets the units and does not limit the resolution. Use None to return TeX's internal units.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\dviread.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:filename arg:dpi Assign Call call:open Assign Assign Assign Assign" - }, - { - "library": "seaborn", - "name": "add_gutters", - "source_code": "def add_gutters(self, points, center, trans_fwd, trans_inv): half_width = self.width / 2 low_gutter = trans_inv(trans_fwd(center) - half_width) off_low = points < low_gutter if off_low.any(): points[off_low] = low_gutter high_gutter = trans_inv(trans_fwd(center) + half_width) off_high = points > high_gutter if off_high.any(): points[off_high] = high_gutter gutter_prop = (off_high + off_low).sum() / len(points) if gutter_prop > self.warn_thresh: msg = '{: .1%} of the points cannot be placed; you may want to decrease the size of the markers or use stripplot.'.format(gutter_prop) warnings.warn(msg, UserWarning) return points", - "docstring": "Stop points from extending beyond their territory.", - "type": "method", - "file_path": "seaborn\\seaborn\\categorical.py", - "ast_data": "FunctionDef name:add_gutters arguments arg:self arg:points arg:center arg:trans_fwd arg:trans_inv Assign Assign Call call:trans_inv Assign Compare op:Lt If Call call:any Assign Assign Call call:trans_inv Assign Compare op:Gt If Call call:any Assign Assign If Compare op:Gt Assign Call call:format Return return:yes" - }, - { - "library": "numpy", - "name": "rindex", - "source_code": "def rindex(self, sub, start = 0, end = None): return rindex(self, sub, start, end)", - "docstring": "Like , but raises :exc: when the substring is not found. See Also -------- char.rindex", - "type": "method", - "file_path": "numpy\\numpy\\_core\\defchararray.py", - "ast_data": "FunctionDef name:rindex arguments arg:self arg:sub arg:start arg:end Return return:yes" - }, - { - "library": "tensorflow", - "name": "relayout", - "source_code": "@tf_export('experimental.dtensor.relayout', v1 = []) def relayout(tensor: tensor_lib.Tensor, layout: layout_lib.Layout, name: Optional[str] = None) -> tensor_lib.Tensor: layout_str = layout.to_string() with default_mesh(layout.mesh): return gen_dtensor_ops.relayout(tensor, layout_str, name = name)", - "docstring": "Changes the layout of . Changes the layout of to . This is used to fine-tune the behavior of ops following/connected to , such as choosing one SPMD expansion pattern over another. This works by forward propagating to connected TensorFlow computation graphs during layout propagation. Currently, only converting layouts from replicated to sharded or sharded to replicated per mesh dimension is supported. That is, \"x, y\" -> \"unsharded, y\" is supported, while \"x, y\" -> \"z, y\" is not supported. We also support a special \"match\" sharding spec, which instructs the relayout to act as an identity operation with respect to any sharding on these mesh dimensions. Relayout is internally lowered to a set of Split and/or AllToAll ops. When tensor layouts are converted from replicated to sharded, the cost is comparatively low because we only insert Split ops and no cross-device communication is needed. However, when tensor layouts are converted from sharded to replicated, cross-device communication may occur, causing potential performance impact. Args: tensor: A DTensor to specify a new layout for. layout: A Layout object specifying a new sharding spec. name: name of the Op. Returns: A DTensor output from the Relayout op.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\dtensor\\python\\api.py", - "ast_data": "FunctionDef name:relayout arguments arg:tensor type:tensor_lib.Tensor arg:layout type:layout_lib.Layout arg:name type:Optional[str] Call call:tf_export Assign Call call:to_string With Return return:yes" - }, - { - "library": "scikit-learn", - "name": "TargetTags", - "source_code": "@dataclass(slots = True) class TargetTags: required: bool one_d_labels: bool = False two_d_labels: bool = False positive_only: bool = False multi_output: bool = False single_output: bool = True", - "docstring": "Tags for the target data. Parameters ---------- required : bool Whether the estimator requires y to be passed to , or methods. The tag is `~sklearn.base.RegressorMixin~sklearn.base.ClassifierMixinmulti-output` if the estimator supports only multi-output cases.", - "type": "class", - "file_path": "scikit-learn\\sklearn\\utils\\_tags.py", - "ast_data": "ClassDef name:TargetTags Call call:dataclass" - }, - { - "library": "pytorch", - "name": "get_comm_latency_between", - "source_code": "def get_comm_latency_between(parent_partition: Partition, child_partition: Partition, transfer_rate_bytes_per_sec: float): if parent_partition.logical_device_ids ! = [] and child_partition.logical_device_ids ! = [] and (parent_partition.logical_device_ids = = child_partition.logical_device_ids): return 0.0 comm_size = 0 visited_nodes = set() for node in child_partition.nodes: input_nodes: dict[Node, None] = {} map_arg(node.args, input_nodes.setdefault) map_arg(node.kwargs, input_nodes.setdefault) for n in input_nodes: if n in parent_partition.nodes and n not in visited_nodes: size_bytes = getattr(n, 'size_bytes', None) if size_bytes is not None: comm_size + = size_bytes.output_size visited_nodes.add(n) return comm_size / transfer_rate_bytes_per_sec", - "docstring": "Given two partitions (parent and child), calculate the communication latency between the two.", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\partitioner_utils.py", - "ast_data": "FunctionDef name:get_comm_latency_between arguments arg:parent_partition type:Partition arg:child_partition type:Partition arg:transfer_rate_bytes_per_sec type:float If BoolOp Compare op:NotEq Compare op:NotEq Compare op:Eq Return return:yes Assign Assign Call call:set For For If BoolOp Compare op:In Compare op:NotIn Assign Call call:getattr If Compare op:IsNot Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_size_inches", - "source_code": "def get_size_inches(self): return np.array(self.bbox_inches.p1)", - "docstring": "Return the current size of the figure in inches. Returns ------- ndarray The size (width, height) of the figure in inches. See Also -------- matplotlib.figure.Figure.set_size_inches matplotlib.figure.Figure.get_figwidth matplotlib.figure.Figure.get_figheight Notes ----- The size in pixels can be obtained by multiplying with .", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\figure.py", - "ast_data": "FunctionDef name:get_size_inches arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "execute_sql_flush", - "source_code": "def execute_sql_flush(self, sql_list): with transaction.atomic(using = self.connection.alias, savepoint = self.connection.features.can_rollback_ddl): with self.connection.cursor() as cursor: for sql in sql_list: cursor.execute(sql)", - "docstring": "Execute a list of SQL statements to flush the database.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\operations.py", - "ast_data": "FunctionDef name:execute_sql_flush arguments arg:self arg:sql_list With With For" - }, - { - "library": "numpy", - "name": "cc_normalize_flags", - "source_code": "def cc_normalize_flags(self, flags): assert isinstance(flags, list) if self.cc_is_gcc or self.cc_is_clang or self.cc_is_icc: return self._cc_normalize_unix(flags) if self.cc_is_msvc or self.cc_is_iccw: return self._cc_normalize_win(flags) return flags", - "docstring": "Remove the conflicts that caused due gathering implied features flags. Parameters ---------- 'flags' list, compiler flags flags should be sorted from the lowest to the highest interest. Returns ------- list, filtered from any conflicts. Examples -------- >>> self.cc_normalize_flags(['-march=armv8.2-a+fp16', '-march=armv8.2-a+dotprod']) ['armv8.2-a+fp16+dotprod'] >>> self.cc_normalize_flags( ['-msse', '-msse2', '-msse3', '-mssse3', '-msse4.1', '-msse4.2', '-mavx', '-march=core-avx2'] ) ['-march=core-avx2']", - "type": "method", - "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py", - "ast_data": "FunctionDef name:cc_normalize_flags arguments arg:self arg:flags If BoolOp Return return:yes If BoolOp Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "freeze", - "source_code": "def freeze(d): if isinstance(d, dict): return frozenset(map(freeze, d.items())) if isinstance(d, set): return frozenset(map(freeze, d)) if isinstance(d, (tuple, list)): return tuple(map(freeze, d)) return d", - "docstring": "Freeze container to hashable form >>> freeze(1) 1 >>> freeze([1, 2]) (1, 2) >>> freeze({1: 2}) # doctest: +SKIP frozenset([(1, 2)])", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\unification\\utils.py", - "ast_data": "FunctionDef name:freeze arguments arg:d If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "set_local_bwd_input", - "source_code": "def set_local_bwd_input(self, next_stage_bwd_outputs: tuple[Optional[torch.Tensor], ...], mb_index: int) -> None: assert isinstance(next_stage_bwd_outputs, tuple), f'Expected tuple, got {type(next_stage_bwd_outputs)}' assert self.has_backward, \"can't set bwd input if this stage doesn't have backward\" assert not self.is_last, \"can't set bwd input if this stage is last\" recv_infos = self.grad_recv_info[mb_index] for info, tensor in zip(recv_infos, next_stage_bwd_outputs): assert isinstance(tensor, torch.Tensor), f'expected tensor values as outputs from prev stage, got {type(tensor)}' assert isinstance(info, _RecvInfo), f'Expected a recv info, got {type(info)}' info.buffer = tensor", - "docstring": "Moves 'grad input' tensors from the next stage to 'grad_output' on this stage, avoiding a copy or send/recv. Does not detach or set '_requires_grad'.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py", - "ast_data": "FunctionDef name:set_local_bwd_input arguments arg:self arg:next_stage_bwd_outputs type:tuple[Optional[torch.Tensor], ...] arg:mb_index type:int Assign For Call call:zip Assign" - }, - { - "library": "tensorflow", - "name": "get_collection", - "source_code": "@tf_export(v1 = ['get_collection']) def get_collection(key, scope = None) -> list[Any]: return get_default_graph().get_collection(key, scope)", - "docstring": "Wrapper for using the default graph. See for more details. Args: key: The key for the collection. For example, the class contains many standard names for collections. scope: (Optional.) If supplied, the resulting list is filtered to include only items whose attribute matches using . Items without a attribute are never returned if a scope is supplied and the choice or means that a without special tokens filters by prefix. Returns: The list of values in the collection with the given , or an empty list if no value has been added to that collection. The list contains the values in the order under which they were collected. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", - "ast_data": "FunctionDef name:get_collection arguments arg:key arg:scope Call call:tf_export Return return:yes" - }, - { - "library": "kornia", - "name": "random_intrinsics", - "source_code": "def random_intrinsics(low: Union[float, Tensor], high: Union[float, Tensor]) -> Tensor: sampler = torch.distributions.Uniform(low, high) fx, fy, cx, cy = (sampler.sample(torch.Size((1,))) for _ in range(4)) zeros, ones = (zeros_like(fx), ones_like(fx)) camera_matrix = concatenate([fx, zeros, cx, zeros, fy, cy, zeros, zeros, ones]) return camera_matrix.view(1, 3, 3)", - "docstring": "Generate a random camera matrix based on a given uniform distribution. Args: low: lower range (inclusive). high: upper range (exclusive). Returns: the random camera matrix with the shape of :math:.", - "type": "function", - "file_path": "kornia\\kornia\\geometry\\epipolar\\projection.py", - "ast_data": "FunctionDef name:random_intrinsics arguments arg:low type:Union[float, Tensor] arg:high type:Union[float, Tensor] Assign Call call:Uniform Assign Assign Assign Call call:concatenate Return return:yes" - }, - { - "library": "scikit-learn", - "name": "BaseLink", - "source_code": "class BaseLink(ABC): is_multiclass = False interval_y_pred = Interval(-np.inf, np.inf, False, False) @abstractmethod def link(self, y_pred, out = None): pass @abstractmethod def inverse(self, raw_prediction, out = None): pass", - "docstring": "Abstract base class for differentiable, invertible link functions. Convention: - link function g: raw_prediction = g(y_pred) - inverse link h: y_pred = h(raw_prediction) For (generalized) linear models, is the so called linear predictor, and is the predicted conditional (on X) expected value of the target . The methods are not implemented as staticmethods in case a link function needs parameters.", - "type": "class", - "file_path": "scikit-learn\\sklearn\\_loss\\link.py", - "ast_data": "ClassDef name:BaseLink Assign Assign Call call:Interval FunctionDef name:link arguments arg:self arg:y_pred arg:out FunctionDef name:inverse arguments arg:self arg:raw_prediction arg:out" - }, - { - "library": "flexx", - "name": "appdata_dir", - "source_code": "def appdata_dir(appname = None, roaming = False): userDir = os.path.expanduser('~') path = None if sys.platform.startswith('win'): path1, path2 = (os.getenv('LOCALAPPDATA'), os.getenv('APPDATA')) path = path2 or path1 if roaming else path1 or path2 elif sys.platform.startswith('darwin'): path = os.path.join(userDir, 'Library', 'Application Support') if not (path and os.path.isdir(path)): path = os.environ.get('XDG_CONFIG_HOME', os.path.expanduser(os.path.join('~', '.config'))) prefix = sys.prefix if getattr(sys, 'frozen', None): prefix = os.path.abspath(os.path.dirname(sys.executable)) for reldir in ('settings', '../settings'): localpath = os.path.abspath(os.path.join(prefix, reldir)) if os.path.isdir(localpath): try: open(os.path.join(localpath, 'test.write'), 'wb').close() os.remove(os.path.join(localpath, 'test.write')) except IOError: pass else: path = localpath break if appname: if path = = userDir: appname = '.' + appname.lstrip('.') path = os.path.join(path, appname) if not os.path.isdir(path): os.mkdir(path) return path", - "docstring": "Get the path to the application directory, where applications are allowed to write user specific files (e.g. configurations).", - "type": "function", - "file_path": "flexx\\flexx\\util\\config.py", - "ast_data": "FunctionDef name:appdata_dir arguments arg:appname arg:roaming Assign Call call:expanduser Assign If Call call:startswith Assign Assign If Call call:startswith Assign Call call:join If Assign Call call:get Assign If Call call:getattr Assign Call call:abspath For Assign Call call:abspath If Call call:isdir Try ExceptHandler Assign If If Compare op:Eq Assign Assign Call call:join If Return return:yes" - }, - { - "library": "matplotlib", - "name": "invalidate", - "source_code": "def invalidate(self): return self._invalidate_internal(level = self._INVALID_AFFINE_ONLY if self.is_affine else self._INVALID_FULL, invalidating_node = self)", - "docstring": "Invalidate this and triggers an invalidation of its ancestors. Should be called any time the transform changes.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", - "ast_data": "FunctionDef name:invalidate arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "read_var_header", - "source_code": "def read_var_header(self): mdtype, byte_count = self._file_reader.read_full_tag() if not byte_count > 0: raise ValueError('Did not read any bytes') next_pos = self.mat_stream.tell() + byte_count if mdtype = = miCOMPRESSED: stream = ZlibInputStream(self.mat_stream, byte_count) self._matrix_reader.set_stream(stream) check_stream_limit = self.verify_compressed_data_integrity mdtype, byte_count = self._matrix_reader.read_full_tag() else: check_stream_limit = False self._matrix_reader.set_stream(self.mat_stream) if not mdtype = = miMATRIX: raise TypeError(f'Expecting miMATRIX type here, got {mdtype}') header = self._matrix_reader.read_header(check_stream_limit) return (header, next_pos)", - "docstring": "Read header, return header, next position Header has to define at least .name and .is_global Parameters ---------- None Returns ------- header : object object that can be passed to self.read_var_array, and that has attributes .name and .is_global next_position : int position in stream of next variable", - "type": "method", - "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py", - "ast_data": "FunctionDef name:read_var_header arguments arg:self Assign Call call:read_full_tag If Raise raises:ValueError('Did not read any bytes') Assign If Compare op:Eq Assign Call call:ZlibInputStream Assign Assign Call call:read_full_tag Assign If Raise raises:TypeError(f'Expecting miMATRIX type here, got {mdtype}') Assign Call call:read_header Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_on_write_restore_ops", - "source_code": "def get_on_write_restore_ops(var, tensor): packed_var = var._packed_variable if packed_var is not None: return control_flow_ops.group(tuple((assign_on_device(d, packed_var, tensor) for d in packed_var.devices))) return control_flow_ops.group(tuple((assign_on_device(v.device, v, tensor) for v in var.values)))", - "docstring": "Return restore ops for AUTO and ON_WRITE variables.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\values_util.py", - "ast_data": "FunctionDef name:get_on_write_restore_ops arguments arg:var arg:tensor Assign If Compare op:IsNot Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "axisinfo", - "source_code": "@staticmethod def axisinfo(unit, axis): return None", - "docstring": "Return an for the axis with the specified units.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\units.py", - "ast_data": "FunctionDef name:axisinfo arguments arg:unit arg:axis Return return:yes" - }, - { - "library": "pandas", - "name": "__len__", - "source_code": "def __len__(self) -> int: return len(self.index)", - "docstring": "Returns length of info axis, but here we use the index.", - "type": "method", - "file_path": "pandas\\pandas\\core\\frame.py", - "ast_data": "FunctionDef name:__len__ arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "code_with_constants", - "source_code": "@property def code_with_constants(self): r = self.forward.code_with_constants return (r[0], ConstMap(r[1]))", - "docstring": "Return a tuple. Returns a tuple of: [0] a pretty-printed representation (as valid Python syntax) of the internal graph for the `codeinspecting-code` for details.", - "type": "method", - "file_path": "pytorch\\torch\\jit\\_script.py", - "ast_data": "FunctionDef name:code_with_constants arguments arg:self Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "get_untyped_storages", - "source_code": "def get_untyped_storages(t: torch.Tensor) -> set[torch.UntypedStorage]: unflattened_tensors = [t] flattened_tensor_storages = set() while len(unflattened_tensors) > 0: obj = unflattened_tensors.pop() if is_traceable_wrapper_subclass(obj): attrs, _ = obj.__tensor_flatten__() unflattened_tensors.extend([getattr(obj, attr) for attr in attrs]) elif not hasattr(obj, 'untyped_storage'): warnings.warn(f'Expected a tensor or a traceable wrapper-subclass of tensor, but got {type(obj)}', category = UserWarning, stacklevel = 2) else: flattened_tensor_storages.add(obj.untyped_storage()) return flattened_tensor_storages", - "docstring": "Recursively extracts untyped storages from a tensor or its subclasses. Args: t (torch.Tensor): The tensor to extract storages from. Returns: Set[torch.UntypedStorage]: A set of untyped storages.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\_tools\\common_utils.py", - "ast_data": "FunctionDef name:get_untyped_storages arguments arg:t type:torch.Tensor Assign Assign Call call:set While Compare op:Gt Assign Call call:pop If Call call:is_traceable_wrapper_subclass Assign Call call:__tensor_flatten__ If Return return:yes" - }, - { - "library": "tensorflow", - "name": "AlreadyExistsError", - "source_code": "@tf_export('errors.AlreadyExistsError') class AlreadyExistsError(OpError): def __init__(self, node_def, op, message, *args): super(AlreadyExistsError, self).__init__(node_def, op, message, ALREADY_EXISTS, *args)", - "docstring": "Raised when an entity that we attempted to create already exists. An API raises this this error to avoid overwriting an existing resource, value, etc. Calling a creation API multiple times with the same arguments could raise this error if the creation API is not idempotent. For example, running an operation that saves a file (e.g. ) could potentially raise this exception if an explicit filename for an existing file was passed.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py", - "ast_data": "ClassDef name:AlreadyExistsError Call call:tf_export FunctionDef name:__init__ arguments arg:self arg:node_def arg:op arg:message vararg:args" - }, - { - "library": "pandas", - "name": "nodefault_used_not_only_for_typing", - "source_code": "def nodefault_used_not_only_for_typing(file_obj: IO[str]) -> Iterable[tuple[int, str]]: contents = file_obj.read() tree = ast.parse(contents) in_annotation = False nodes: list[tuple[bool, ast.AST]] = [(in_annotation, tree)] while nodes: in_annotation, node = nodes.pop() if not in_annotation and (isinstance(node, ast.Name) and node.id = = 'NoDefault' or (isinstance(node, ast.Attribute) and node.attr = = 'NoDefault')): yield (node.lineno, 'NoDefault is used not only for typing') for name in reversed(node._fields): value = getattr(node, name) if name in {'annotation', 'returns'}: next_in_annotation = True else: next_in_annotation = in_annotation if isinstance(value, ast.AST): nodes.append((next_in_annotation, value)) elif isinstance(value, list): nodes.extend(((next_in_annotation, value) for value in reversed(value) if isinstance(value, ast.AST)))", - "docstring": "Test case where pandas._libs.lib.NoDefault is not used for typing. Parameters ---------- file_obj : IO File-like object containing the Python code to validate. Yields ------ line_number : int Line number of misused lib.NoDefault. msg : str Explanation of the error.", - "type": "function", - "file_path": "pandas\\scripts\\validate_unwanted_patterns.py", - "ast_data": "FunctionDef name:nodefault_used_not_only_for_typing arguments arg:file_obj type:IO[str] Assign Call call:read Assign Call call:parse Assign While Assign Call call:pop If BoolOp BoolOp BoolOp Call call:isinstance Compare op:Eq BoolOp Call call:isinstance Compare op:Eq For Call call:reversed Assign Call call:getattr If Compare op:In Assign Assign If Call call:isinstance If Call call:isinstance" - }, - { - "library": "numpy", - "name": "sum", - "source_code": "def sum(self, axis = None, dtype = None, out = None): return N.ndarray.sum(self, axis, dtype, out, keepdims = True)._collapse(axis)", - "docstring": "Returns the sum of the matrix elements, along the given axis. Refer to for full documentation. See Also -------- numpy.sum Notes ----- This is the same as , except that where an would be returned, a object is returned instead. Examples -------- >>> x = np.matrix([[1, 2], [4, 3]]) >>> x.sum() 10 >>> x.sum(axis=1) matrix([[3], [7]]) >>> x.sum(axis=1, dtype='float') matrix([[3.], [7.]]) >>> out = np.zeros((2, 1), dtype='float') >>> x.sum(axis=1, dtype='float', out=np.asmatrix(out)) matrix([[3.], [7.]])", - "type": "method", - "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py", - "ast_data": "FunctionDef name:sum arguments arg:self arg:axis arg:dtype arg:out Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, *input_types, **kwargs): self._input_types = input_types self._func_name = kwargs.pop('func_name', None) self._grad_func = kwargs.pop('grad_func', None) self._python_grad_func = kwargs.pop('python_grad_func', None) self._out_names = kwargs.pop('out_names', None) self._extra_kwargs = kwargs", - "docstring": "Create a decorator. Args: *input_types: A list of **kwargs: Optional keyword arguments, including func_name - (optional). A python string, the name to use to declare this in the graph. grad_func - (optional). A function implementing the gradient of the function-to-register. This is must be a object. The gradient function must satisfy the criterion defined in function.proto:GradientDef. python_grad_func - (optional). A function implementing the gradient of the function python-side. This function must take the current op and the gradients w.r.t. its outputs, and return the gradients w.r.t. the inputs. That is it must implement the interface expected by ). This will be called by tf.gradients to add the gradient ops to the graph. At most one of grad_func and python_grad_func can be specified. out_names = (optional). A list of strings, one per output tensor. shape_func - (optional). A function taking the op and returning a list of static shapes to set for the function's outputs.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\function.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self vararg:input_types kwarg:kwargs Assign Assign Call call:pop Assign Call call:pop Assign Call call:pop Assign Call call:pop Assign" - }, - { - "library": "pytorch", - "name": "mesh", - "source_code": "def mesh(tag, vertices, colors, faces, config_dict, display_name = None, description = None): from tensorboard.plugins.mesh import metadata from tensorboard.plugins.mesh.plugin_data_pb2 import MeshPluginData json_config = _get_json_config(config_dict) summaries = [] tensors = [(vertices, MeshPluginData.VERTEX), (faces, MeshPluginData.FACE), (colors, MeshPluginData.COLOR)] tensors = [tensor for tensor in tensors if tensor[0] is not None] components = metadata.get_components_bitmask([content_type for tensor, content_type in tensors]) for tensor, content_type in tensors: summaries.append(_get_tensor_summary(tag, display_name, description, tensor, content_type, components, json_config)) return Summary(value = summaries)", - "docstring": "Output a merged protocol buffer with a mesh/point cloud. Args: tag: A name for this summary operation. vertices: Tensor of shape representing the 3D coordinates of vertices. faces: Tensor of shape containing indices of vertices within each triangle. colors: Tensor of shape containing colors for each vertex. display_name: If set, will be used as the display name in TensorBoard. Defaults to . description: A longform readable description of the summary data. Markdown is supported. config_dict: Dictionary with ThreeJS classes names and configuration. Returns: Merged summary for mesh/point cloud representation.", - "type": "function", - "file_path": "pytorch\\torch\\utils\\tensorboard\\summary.py", - "ast_data": "FunctionDef name:mesh arguments arg:tag arg:vertices arg:colors arg:faces arg:config_dict arg:display_name arg:description Assign Call call:_get_json_config Assign Assign Assign Assign Call call:get_components_bitmask For Return return:yes" - }, - { - "library": "tensorflow", - "name": "extract_output_file_path", - "source_code": "def extract_output_file_path(args): if args and args[-1].endswith('>'): raise SyntaxError('Redirect file path is empty') elif args and args[-1].startswith('>'): try: _parse_interval(args[-1]) if len(args) > 1 and args[-2].startswith('-'): output_file_path = None else: output_file_path = args[-1][1:] args = args[: -1] except ValueError: output_file_path = args[-1][1:] args = args[: -1] elif len(args) > 1 and args[-2] = = '>': output_file_path = args[-1] args = args[: -2] elif args and args[-1].count('>') = = 1: gt_index = args[-1].index('>') if gt_index > 0 and args[-1][gt_index - 1] = = ' = ': output_file_path = None else: output_file_path = args[-1][gt_index + 1:] args[-1] = args[-1][: gt_index] elif len(args) > 1 and args[-2].endswith('>'): output_file_path = args[-1] args = args[: -1] args[-1] = args[-1][: -1] else: output_file_path = None return (args, output_file_path)", - "docstring": "Extract output file path from command arguments. Args: args: (list of str) command arguments. Returns: (list of str) Command arguments with the output file path part stripped. (str or None) Output file path (if any). Raises: SyntaxError: If there is no file path after the last \">\" character.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\command_parser.py", - "ast_data": "FunctionDef name:extract_output_file_path arguments arg:args If BoolOp Call call:endswith Raise raises:SyntaxError('Redirect file path is empty') If BoolOp Call call:startswith Try If BoolOp Compare op:Gt Call call:startswith Assign Assign Assign ExceptHandler Assign Assign If BoolOp Compare op:Gt Compare op:Eq Assign Assign If BoolOp Compare op:Eq Assign Call call:index If BoolOp Compare op:Gt Compare op:Eq Assign Assign Assign If BoolOp Compare op:Gt Call call:endswith Assign Assign Assign Assign Return return:yes" - }, - { - "library": "django", - "name": "get_initial_for_field", - "source_code": "def get_initial_for_field(self, field, field_name): value = self.initial.get(field_name, field.initial) if callable(value): value = value() if isinstance(value, (datetime.datetime, datetime.time)) and (not field.widget.supports_microseconds): value = value.replace(microsecond = 0) return value", - "docstring": "Return initial data for field on form. Use initial data from the form or the field, in that order. Evaluate callable values.", - "type": "method", - "file_path": "django\\django\\forms\\forms.py", - "ast_data": "FunctionDef name:get_initial_for_field arguments arg:self arg:field arg:field_name Assign Call call:get If Call call:callable Assign Call call:value If BoolOp Call call:isinstance Assign Call call:replace Return return:yes" - }, - { - "library": "django", - "name": "is_multipart", - "source_code": "def is_multipart(self): if self.forms: return self.forms[0].is_multipart() else: return self.empty_form.is_multipart()", - "docstring": "Return True if the formset needs to be multipart, i.e. it has FileInput, or False otherwise.", - "type": "method", - "file_path": "django\\django\\forms\\formsets.py", - "ast_data": "FunctionDef name:is_multipart arguments arg:self If Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "IntArrayRef_summary", - "source_code": "def IntArrayRef_summary(valobj: Any, internal_dict: Any, options: Any) -> str: with DisableBreakpoints(): target = get_target() tensor = valobj.GetName() result = target.EvaluateExpression(f'torch: : gdb: : int_array_ref_string({tensor})') str_result = str(result) str_result = str_result[str_result.find('\"') + 1: -1] return str_result", - "docstring": "Print human readable representation of c10::IntArrayRef", - "type": "function", - "file_path": "pytorch\\tools\\lldb\\pytorch_lldb.py", - "ast_data": "FunctionDef name:IntArrayRef_summary arguments arg:valobj type:Any arg:internal_dict type:Any arg:options type:Any With Assign Call call:get_target Assign Call call:GetName Assign Call call:EvaluateExpression Assign Call call:str Assign Return return:yes" - }, - { - "library": "authlib", - "name": "validate_id_token_encryption_enc_values_supported", - "source_code": "def validate_id_token_encryption_enc_values_supported(self): validate_array_value(self, 'id_token_encryption_enc_values_supported')", - "docstring": "OPTIONAL. JSON array containing a list of the JWE encryption algorithms (enc values) supported by the OP for the ID Token to encode the Claims in a JWT.", - "type": "method", - "file_path": "authlib\\authlib\\oidc\\discovery\\models.py", - "ast_data": "FunctionDef name:validate_id_token_encryption_enc_values_supported arguments arg:self" - }, - { - "library": "tensorflow", - "name": "state_size", - "source_code": "@property def state_size(self): raise NotImplementedError('Abstract method')", - "docstring": "size(s) of state(s) used by this cell. It can be represented by an Integer, a TensorShape or a tuple of Integers or TensorShapes.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\recurrent.py", - "ast_data": "FunctionDef name:state_size arguments arg:self Raise raises:NotImplementedError('Abstract method')" - }, - { - "library": "salmon", - "name": "__str__", - "source_code": "def __str__(self): return encoding.to_string(self.base)", - "docstring": "Converts this to a string usable for storage into a queue or transmission.", - "type": "method", - "file_path": "salmon\\salmon\\mail.py", - "ast_data": "FunctionDef name:__str__ arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "cluster_spec", - "source_code": "def cluster_spec(self): return self._cluster_spec", - "docstring": "Returns the ClusterSpec passed into the constructor.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py", - "ast_data": "FunctionDef name:cluster_spec arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "encode_resource_handle", - "source_code": "def encode_resource_handle(resource_handle): return numpy_compat.np_asarray(bytearray(resource_handle.SerializeToString()), dtype = dtypes.np_resource)", - "docstring": "Encode a ResourceHandle proto as custom numpy struct type.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\session_ops.py", - "ast_data": "FunctionDef name:encode_resource_handle arguments arg:resource_handle Return return:yes" - }, - { - "library": "tensorflow", - "name": "save", - "source_code": "def save(self, save_path, options = None): save_start_time = time.time() if not self._initialized: self._ensure_initialized() else: self._queue.join() self._copy_to_cpu() self._check_async_thread_error() save_counter = self.checkpointer().save_counter.numpy() + 1 full_path = '{}-{}'.format(save_path, save_counter) context.async_wait() self._save_file_prefix = save_path self._use_checkpoint_save = True self._checkpoint_options = copy.copy(options) if options else None if self._checkpoint_options: self._checkpoint_options.experimental_enable_async_checkpoint = False self._queue.put(True) save_end_time = time.time() metrics.AddCheckpointWriteDuration(api_label = _ASYNC_CHECKPOINT, microseconds = _get_duration_microseconds(save_start_time, save_end_time)) return full_path", - "docstring": "Save the checkpointed variables. Args: save_path: The file prefix of the checkpoint file. options: Optional CheckpointOption instance. Returns: The full path of the checkpoint file.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\async_checkpoint_helper.py", - "ast_data": "FunctionDef name:save arguments arg:self arg:save_path arg:options Assign Call call:time If Assign Assign Call call:format Assign Assign Assign If Assign Assign Call call:time Return return:yes" - }, - { - "library": "django", - "name": "test_capability", - "source_code": "def test_capability(self, capability): return bool(capi.test_capability(self.ptr, force_bytes(capability)))", - "docstring": "Return a bool indicating whether the this Layer supports the given capability (a string). Valid capability strings include: 'RandomRead', 'SequentialWrite', 'RandomWrite', 'FastSpatialFilter', 'FastFeatureCount', 'FastGetExtent', 'CreateField', 'Transactions', 'DeleteFeature', and 'FastSetNextByIndex'.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py", - "ast_data": "FunctionDef name:test_capability arguments arg:self arg:capability Return return:yes" - }, - { - "library": "django", - "name": "construct_relative_path", - "source_code": "def construct_relative_path(current_template_name, relative_name, allow_recursion = False): new_name = relative_name.strip('\\'\"') if not new_name.startswith(('./', '../')): return relative_name if current_template_name is None: raise TemplateSyntaxError(f'The relative path {relative_name} cannot be evaluated due to an unknown template origin.') new_name = posixpath.normpath(posixpath.join(posixpath.dirname(current_template_name.lstrip('/')), new_name)) if new_name.startswith('../'): raise TemplateSyntaxError(\"The relative path '%s' points outside the file hierarchy that template '%s' is in.\" % (relative_name, current_template_name)) if not allow_recursion and current_template_name.lstrip('/') = = new_name: raise TemplateSyntaxError(\"The relative path '%s' was translated to template name '%s', the same template in which the tag appears.\" % (relative_name, current_template_name)) has_quotes = relative_name.startswith(('\"', \"'\")) and relative_name[0] = = relative_name[-1] return f'\"{new_name}\"' if has_quotes else new_name", - "docstring": "Convert a relative path (starting with './' or '../') to the full template name based on the current_template_name.", - "type": "function", - "file_path": "django\\django\\template\\loader_tags.py", - "ast_data": "FunctionDef name:construct_relative_path arguments arg:current_template_name arg:relative_name arg:allow_recursion Assign Call call:strip If Return return:yes If Compare op:Is Raise raises:TemplateSyntaxError(f'The relative path {relative_name} cannot be evaluated due to an unknown template origin.') Assign Call call:normpath If Call call:startswith Raise raises:TemplateSyntaxError(\"The relative path '%s' points outside the file hierarchy that template '%s' is in.\" % (relative_name, current_template_name)) If BoolOp Compare op:Eq Raise raises:TemplateSyntaxError(\"The relative path '%s' was translated to template name '%s', the same template in which the tag appears.\" % (relative_name, current_template_name)) Assign BoolOp Call call:startswith Compare op:Eq Return return:yes" - }, - { - "library": "django", - "name": "int64_output", - "source_code": "def int64_output(func, argtypes): func.argtypes = argtypes func.restype = c_int64 return func", - "docstring": "Generate a ctypes function that returns a 64-bit integer value.", - "type": "function", - "file_path": "django\\django\\contrib\\gis\\gdal\\prototypes\\generation.py", - "ast_data": "FunctionDef name:int64_output arguments arg:func arg:argtypes Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "set_non_traceable_module_classes", - "source_code": "def set_non_traceable_module_classes(self, module_classes: list[type]) -> PrepareCustomConfig: self.non_traceable_module_classes = module_classes return self", - "docstring": "Set the modules that are not symbolically traceable, identified by class.", - "type": "method", - "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py", - "ast_data": "FunctionDef name:set_non_traceable_module_classes arguments arg:self arg:module_classes type:list[type] Assign Return return:yes" - }, - { - "library": "algorithms", - "name": "pythagoras", - "source_code": "def pythagoras(opposite, adjacent, hypotenuse): try: if opposite = = str('?'): return 'Opposite = ' + str((hypotenuse ** 2 - adjacent ** 2) ** 0.5) if adjacent = = str('?'): return 'Adjacent = ' + str((hypotenuse ** 2 - opposite ** 2) ** 0.5) if hypotenuse = = str('?'): return 'Hypotenuse = ' + str((opposite ** 2 + adjacent ** 2) ** 0.5) return 'You already know the answer!' except: raise ValueError('invalid argument(s) were given.')", - "docstring": "Returns length of a third side of a right angled triangle. Passing \"?\" will indicate the unknown side.", - "type": "function", - "file_path": "algorithms\\algorithms\\maths\\pythagoras.py", - "ast_data": "FunctionDef name:pythagoras arguments arg:opposite arg:adjacent arg:hypotenuse Try If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes Return return:yes ExceptHandler Raise raises:ValueError('invalid argument(s) were given.')" - }, - { - "library": "pytorch", - "name": "get_current_device_index", - "source_code": "def get_current_device_index() -> int: if torch.cuda.device_count() > 0: return torch.cuda.current_device() return -1", - "docstring": "Checks if there are CUDA devices available and returns the device index of the current default CUDA device. Returns -1 in case there are no CUDA devices available. Arguments: ``", - "type": "function", - "file_path": "pytorch\\torch\\_utils.py", - "ast_data": "FunctionDef name:get_current_device_index arguments If Compare op:Gt Return return:yes Return return:yes" - }, - { - "library": "cherrypy", - "name": "check_config_types", - "source_code": "def check_config_types(self): self._known_types(cherrypy.config) for sn, app in cherrypy.tree.apps.items(): if not isinstance(app, cherrypy.Application): continue self._known_types(app.config)", - "docstring": "Assert that config values are of the same type as default values.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\_cpchecker.py", - "ast_data": "FunctionDef name:check_config_types arguments arg:self For Call call:items If" - }, - { - "library": "tensorflow", - "name": "get_legacy_output_types", - "source_code": "@tf_export(v1 = ['data.get_output_types']) def get_legacy_output_types(dataset_or_iterator): return nest.map_structure(lambda component_spec: component_spec._to_legacy_output_types(), get_structure(dataset_or_iterator))", - "docstring": "Returns the output shapes for elements of the input dataset / iterator. Args: dataset_or_iterator: A or . Returns: A (nested) structure of objects matching the structure of dataset / iterator elements and specifying the shape of the individual components. @compatibility(TF2) This is a legacy API for inspecting the type signature of dataset elements. In TF 2, you should use the attribute instead. @end_compatibility", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", - "ast_data": "FunctionDef name:get_legacy_output_types arguments arg:dataset_or_iterator Call call:tf_export Return return:yes" - }, - { - "library": "django", - "name": "timeuntil", - "source_code": "def timeuntil(d, now = None, time_strings = None, depth = 2): return timesince(d, now, reversed = True, time_strings = time_strings, depth = depth)", - "docstring": "Like timesince, but return a string measuring the time until the given time.", - "type": "function", - "file_path": "django\\django\\utils\\timesince.py", - "ast_data": "FunctionDef name:timeuntil arguments arg:d arg:now arg:time_strings arg:depth Return return:yes" - }, - { - "library": "scipy", - "name": "get_site_packages", - "source_code": "def get_site_packages(self): if sys.version_info > = (3, 12): plat_path = Path(sysconfig.get_path('platlib')) elif 'deb_system' in sysconfig.get_scheme_names(): plat_path = Path(sysconfig.get_path('platlib', 'deb_system')) else: plat_path = Path(sysconfig.get_path('platlib')) return self.installed / plat_path.relative_to(sys.exec_prefix)", - "docstring": "Depending on whether we have debian python or not, return dist_packages path or site_packages path.", - "type": "method", - "file_path": "scipy\\dev.py", - "ast_data": "FunctionDef name:get_site_packages arguments arg:self If Compare op:GtE Assign Call call:Path If Compare op:In Assign Call call:Path Assign Call call:Path Return return:yes" - }, - { - "library": "pytorch", - "name": "log_abs_det_jacobian", - "source_code": "def log_abs_det_jacobian(self, x, y): raise NotImplementedError", - "docstring": "Computes the log det jacobian given input and output.", - "type": "method", - "file_path": "pytorch\\torch\\distributions\\transforms.py", - "ast_data": "FunctionDef name:log_abs_det_jacobian arguments arg:self arg:x arg:y Raise raises:NotImplementedError" - }, - { - "library": "pytorch", - "name": "compute_values", - "source_code": "def compute_values(self, value_names: Sequence[str], args = (), kwargs = None) -> Sequence[torch.Tensor]: if kwargs is None: kwargs = {} self.release() values = _create_value_mapping(self.model.graph) for name in value_names: if name not in values: raise ValueError(f\"Value '{name}' not found in the model. Please provide a valid value name.\") temporary_outputs = [values[name] for name in value_names] with _set_graph_outputs(self.model.graph, temporary_outputs): try: result = self(*args, **kwargs) finally: self.release() return result", - "docstring": "Compute the values of the specified names in the ONNX model. This method is used to compute the values of the specified names in the ONNX model. The values are returned as a dictionary mapping names to tensors. Args: value_names: The names of the values to compute. Returns: A dictionary mapping names to tensors.", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py", - "ast_data": "FunctionDef name:compute_values arguments arg:self arg:value_names type:Sequence[str] arg:args arg:kwargs If Compare op:Is Assign Assign Call call:_create_value_mapping For If Compare op:NotIn Raise raises:ValueError(f\"Value '{name}' not found in the model. Please provide a valid value name.\") Assign With Try Assign Call call:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "stairs", - "source_code": "@_preprocess_data() def stairs(self, values, edges = None, *, orientation = 'vertical', baseline = 0, fill = False, **kwargs): if 'color' in kwargs: _color = kwargs.pop('color') else: _color = self._get_lines.get_next_color() if fill: kwargs.setdefault('linewidth', 0) kwargs.setdefault('facecolor', _color) else: kwargs.setdefault('edgecolor', _color) if edges is None: edges = np.arange(len(values) + 1) edges, values, baseline = self._process_unit_info([('x', edges), ('y', values), ('y', baseline)], kwargs) patch = mpatches.StepPatch(values, edges, baseline = baseline, orientation = orientation, fill = fill, **kwargs) self.add_patch(patch) if baseline is None and fill: _api.warn_external(f'Both baseline = {baseline!r} and fill = {fill!r} have been passed. baseline = None is only intended for unfilled stair plots. Because baseline is None, the Path used to draw the stairs will not be closed, thus because fill is True the polygon will be closed by drawing an (unstroked) edge from the first to last point. It is very likely that the resulting fill patterns is not the desired result.') if baseline is not None: if orientation = = 'vertical': patch.sticky_edges.y.append(np.min(baseline)) self.update_datalim([(edges[0], np.min(baseline))]) else: patch.sticky_edges.x.append(np.min(baseline)) self.update_datalim([(np.min(baseline), edges[0])]) self._request_autoscale_view() return patch", - "docstring": "Draw a stepwise constant function as a line or a filled plot. *edges* define the x-axis positions of the steps. *values* the function values between these steps. Depending on *fill*, the function is drawn either as a continuous line with vertical segments at the edges, or as a filled area. Parameters ---------- values : array-like The step heights. edges : array-like The step positions, with ` and `~matplotlib.patches.StepPatch~matplotlib.patches.StepPatch` properties", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py", - "ast_data": "FunctionDef name:stairs arguments arg:self arg:values arg:edges kwarg:kwargs Call call:_preprocess_data If Compare op:In Assign Call call:pop Assign Call call:get_next_color If If Compare op:Is Assign Call call:arange Assign Call call:_process_unit_info Assign Call call:StepPatch If BoolOp Compare op:Is If Compare op:IsNot If Compare op:Eq Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_backend", - "source_code": "def get_backend(*, auto_select = True): if auto_select: return rcParams['backend'] else: backend = rcParams._get('backend') if backend is rcsetup._auto_backend_sentinel: return None else: return backend", - "docstring": "Return the name of the current backend. Parameters ---------- auto_select : bool, default: True Whether to trigger backend resolution if no backend has been selected so far. If True, this ensures that a valid backend is returned. If False, this returns None if no backend has been selected so far. .. versionadded:: 3.10 .. admonition:: Provisional The *auto_select* flag is provisional. It may be changed or removed without prior warning. See Also -------- matplotlib.use", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\__init__.py", - "ast_data": "FunctionDef name:get_backend arguments If Return return:yes Assign Call call:_get If Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "feincms", - "name": "run_request_processors", - "source_code": "def run_request_processors(self): if not getattr(self.object, 'request_processors', None): return for fn in reversed(list(self.object.request_processors.values())): r = fn(self.object, self.request) if r: return r", - "docstring": "Before rendering an object, run all registered request processors. A request processor may peruse and modify the page or the request. It can also return a `` for shortcutting the rendering and returning that response immediately to the client.", - "type": "method", - "file_path": "feincms\\feincms\\module\\mixins.py", - "ast_data": "FunctionDef name:run_request_processors arguments arg:self If Return return:no For Call call:reversed Assign Call call:fn If Return return:yes" - }, - { - "library": "pytorch", - "name": "get_unique_name_wrt", - "source_code": "def get_unique_name_wrt(prefix: str, *containers, requires_suffix = False) -> str: if not requires_suffix and (not is_in(prefix, *containers)): return prefix for i in itertools.count(): candidate = f'{prefix}_{i}' if not is_in(candidate, *containers): return candidate raise AssertionError('unreachable')", - "docstring": "Return a name that starts with and is not in any of the (e.g., map, set).", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\utils.py", - "ast_data": "FunctionDef name:get_unique_name_wrt arguments arg:prefix type:str vararg:containers If BoolOp Return return:yes For Call call:count Assign If Return return:yes Raise raises:AssertionError('unreachable')" - }, - { - "library": "pytorch", - "name": "validate_rearrange_expressions", - "source_code": "def validate_rearrange_expressions(left: ParsedExpression, right: ParsedExpression, axes_lengths: Mapping[str, int]) -> None: for length in axes_lengths.values(): if (length_type: = type(length)) is not int: raise TypeError(f'rearrange axis lengths must be integers, got: {length_type}') if left.has_non_unitary_anonymous_axes or right.has_non_unitary_anonymous_axes: raise ValueError('rearrange only supports unnamed axes of size 1') difference = set.symmetric_difference(left.identifiers, right.identifiers) if len(difference) > 0: raise ValueError(f'Identifiers only on one side of rearrange expression (should be on both): {difference}') unmatched_axes = axes_lengths.keys() - left.identifiers if len(unmatched_axes) > 0: raise ValueError(f'Identifiers not found in rearrange expression: {unmatched_axes}')", - "docstring": "Perform expression validations that are specific to the operation. Args: left (ParsedExpression): left-hand side expression right (ParsedExpression): right-hand side expression axes_lengths (Mapping[str, int]): any additional length specifications for dimensions", - "type": "function", - "file_path": "pytorch\\functorch\\einops\\_parsing.py", - "ast_data": "FunctionDef name:validate_rearrange_expressions arguments arg:left type:ParsedExpression arg:right type:ParsedExpression arg:axes_lengths type:Mapping[str, int] For Call call:values If Compare op:IsNot Raise raises:TypeError(f'rearrange axis lengths must be integers, got: {length_type}') If BoolOp Raise raises:ValueError('rearrange only supports unnamed axes of size 1') Assign Call call:symmetric_difference If Compare op:Gt Raise raises:ValueError(f'Identifiers only on one side of rearrange expression (should be on both): {difference}') Assign If Compare op:Gt Raise raises:ValueError(f'Identifiers not found in rearrange expression: {unmatched_axes}')" - }, - { - "library": "matplotlib", - "name": "set_ylim", - "source_code": "def set_ylim(self, bottom = None, top = None, *, emit = True, auto = False, ymin = None, ymax = None): if top is None and np.iterable(bottom): bottom, top = bottom if ymin is not None: if bottom is not None: raise TypeError(\"Cannot pass both 'bottom' and 'ymin'\") bottom = ymin if ymax is not None: if top is not None: raise TypeError(\"Cannot pass both 'top' and 'ymax'\") top = ymax return self.yaxis._set_lim(bottom, top, emit = emit, auto = auto)", - "docstring": "Set the y-axis view limits. Parameters ---------- bottom : float, optional The bottom ylim in data coordinates. Passing *None* leaves the limit unchanged. The bottom and top ylims may also be passed as the tuple (*bottom*, *top*) as the first positional argument (or as the *bottom* keyword argument). .. ACCEPTS: (bottom: float, top: float) top : float, optional The top ylim in data coordinates. Passing *None* leaves the limit unchanged. emit : bool, default: True Whether to notify observers of limit change. auto : bool or None, default: False Whether to turn on autoscaling of the y-axis. *True* turns on, *False* turns off, *None* leaves unchanged. ymin, ymax : float, optional They are equivalent to bottom and top respectively, and it is an error to pass both *ymin* and *bottom* or *ymax* and *top*. Returns ------- bottom, top : (float, float) The new y-axis limits in data coordinates. See Also -------- get_ylim set_ybound, get_ybound invert_yaxis, yaxis_inverted Notes ----- The *bottom* value may be greater than the *top* value, in which case the y-axis values will decrease from *bottom* to *top*. Examples -------- >>> set_ylim(bottom, top) >>> set_ylim((bottom, top)) >>> bottom, top = set_ylim(bottom, top) One limit may be left unchanged. >>> set_ylim(top=top_lim) Limits may be passed in reverse order to flip the direction of the y-axis. For example, suppose `` represents depth of the ocean in m. The y-axis limits might be set like the following so 5000 m depth is at the bottom of the plot and the surface, 0 m, is at the top. >>> set_ylim(5000, 0)", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", - "ast_data": "FunctionDef name:set_ylim arguments arg:self arg:bottom arg:top If BoolOp Compare op:Is Call call:iterable Assign If Compare op:IsNot If Compare op:IsNot Raise raises:TypeError(\"Cannot pass both 'bottom' and 'ymin'\") Assign If Compare op:IsNot If Compare op:IsNot Raise raises:TypeError(\"Cannot pass both 'top' and 'ymax'\") Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "dispatch", - "source_code": "@staticmethod def dispatch(sm: 'SamplingMethod') -> SamplingType: if sm = = SamplingMethod.RANDOM: return partial(SamplingMethod._generate_value_for_type, True) elif sm = = SamplingMethod.TOGGLE: return partial(SamplingMethod._generate_value_for_type, False) else: raise ValueError(f'malformed sampling method: {sm}')", - "docstring": "Returns a function that will generate values from a type, based on the SamplingMethod passed in.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\fuzzer.py", - "ast_data": "FunctionDef name:dispatch arguments arg:sm type:'SamplingMethod' If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes Raise raises:ValueError(f'malformed sampling method: {sm}')" - }, - { - "library": "pygame", - "name": "alive", - "source_code": "def alive(self): return bool(self.__g)", - "docstring": "does the sprite belong to any groups Sprite.alive(): return bool Returns True when the Sprite belongs to one or more Groups.", - "type": "method", - "file_path": "pygame\\src_py\\sprite.py", - "ast_data": "FunctionDef name:alive arguments arg:self Return return:yes" - }, - { - "library": "pandas", - "name": "combine_hash_arrays", - "source_code": "def combine_hash_arrays(arrays: Iterator[np.ndarray], num_items: int) -> npt.NDArray[np.uint64]: try: first = next(arrays) except StopIteration: return np.array([], dtype = np.uint64) arrays = itertools.chain([first], arrays) mult = np.uint64(1000003) out = np.zeros_like(first) + np.uint64(3430008) last_i = 0 for i, a in enumerate(arrays): inverse_i = num_items - i out ^ = a out * = mult mult + = np.uint64(82520 + inverse_i + inverse_i) last_i = i assert last_i + 1 = = num_items, 'Fed in wrong num_items' out + = np.uint64(97531) return out", - "docstring": "Parameters ---------- arrays : Iterator[np.ndarray] num_items : int Returns ------- np.ndarray[uint64] Should be the same as CPython's tupleobject.c", - "type": "function", - "file_path": "pandas\\pandas\\core\\util\\hashing.py", - "ast_data": "FunctionDef name:combine_hash_arrays arguments arg:arrays type:Iterator[np.ndarray] arg:num_items type:int Try Assign Call call:next ExceptHandler Return return:yes Assign Call call:chain Assign Call call:uint64 Assign Assign For Call call:enumerate Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "finalize_fn", - "source_code": "def finalize_fn(iterator_id_t): def finalize_py_func(iterator_id): generator_state.iterator_completed(iterator_id) return np.array(0, dtype = np.int64) return script_ops.numpy_function(finalize_py_func, [iterator_id_t], dtypes.int64)", - "docstring": "Releases host-side state for the iterator with ID .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\from_generator_op.py", - "ast_data": "FunctionDef name:finalize_fn arguments arg:iterator_id_t FunctionDef name:finalize_py_func arguments arg:iterator_id Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "isinstance", - "source_code": "def isinstance(obj, target_type): return _isinstance(obj, target_type)", - "docstring": "Provide container type refinement in TorchScript. It can refine parameterized containers of the List, Dict, Tuple, and Optional types. E.g. `` for type refinement): .. testcode:: import torch from typing import Any, Dict, List class MyModule(torch.nn.Module): def __init__(self) -> None: super().__init__() def forward(self, input: Any): # note the Any type if torch.jit.isinstance(input, List[torch.Tensor]): for t in input: y = t.clamp(0, 0.5) elif torch.jit.isinstance(input, Dict[str, str]): for val in input.values(): print(val) m = torch.jit.script(MyModule()) x = [torch.rand(3,3), torch.rand(4,3)] m(x) y = {\"key1\":\"val1\",\"key2\":\"val2\"} m(y)", - "type": "function", - "file_path": "pytorch\\torch\\jit\\__init__.py", - "ast_data": "FunctionDef name:isinstance arguments arg:obj arg:target_type Return return:yes" - }, - { - "library": "scipy", - "name": "logsf", - "source_code": "def logsf(self, x, *args, **kwds): args, loc, scale = self._parse_args(*args, **kwds) x, loc, scale = map(asarray, (x, loc, scale)) args = tuple(map(asarray, args)) _a, _b = self._get_support(*args) dtyp = np.promote_types(x.dtype, np.float64) x = np.asarray((x - loc) / scale, dtype = dtyp) cond0 = self._argcheck(*args) & (scale > 0) cond1 = self._open_support_mask(x, *args) & (scale > 0) cond2 = cond0 & (x < = _a) cond = cond0 & cond1 output = empty(shape(cond), dtyp) output.fill(-inf) place(output, 1 - cond0 + np.isnan(x), self.badvalue) place(output, cond2, 0.0) if np.any(cond): goodargs = argsreduce(cond, *(x,) + args) place(output, cond, self._logsf(*goodargs)) if output.ndim = = 0: return output[()] return output", - "docstring": "Log of the survival function of the given RV. Returns the log of the \"survival function,\" defined as (1 - ), evaluated at . Parameters ---------- x : array_like quantiles arg1, arg2, arg3,... : array_like The shape parameter(s) for the distribution (see docstring of the instance object for more information) loc : array_like, optional location parameter (default=0) scale : array_like, optional scale parameter (default=1) Returns ------- logsf : ndarray Log of the survival function evaluated at .", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_distn_infrastructure.py", - "ast_data": "FunctionDef name:logsf arguments arg:self arg:x vararg:args kwarg:kwds Assign Call call:_parse_args Assign Call call:map Assign Call call:tuple Assign Call call:_get_support Assign Call call:promote_types Assign Call call:asarray Assign Assign Assign Assign Assign Call call:empty If Call call:any Assign Call call:argsreduce If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "master", - "source_code": "def master(self, task_type = None, task_id = None, rpc_layer = None): if task_type is not None and task_id is not None: master = self.cluster_spec().task_address(task_type, task_id) return format_master_url(master, rpc_layer or self._rpc_layer) return self._cluster_resolvers[0].master(rpc_layer = rpc_layer)", - "docstring": "Returns the master address to use when creating a session. This usually returns the master from the first ClusterResolver passed in, but you can override this by specifying the task_type and task_id. Note: this is only useful for TensorFlow 1.x. Args: task_type: (Optional) The type of the TensorFlow task of the master. task_id: (Optional) The index of the TensorFlow task of the master. rpc_layer: (Optional) The RPC protocol for the given cluster. Returns: The name or URL of the session master.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py", - "ast_data": "FunctionDef name:master arguments arg:self arg:task_type arg:task_id arg:rpc_layer If BoolOp Compare op:IsNot Compare op:IsNot Assign Call call:task_address Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_wx_font", - "source_code": "def get_wx_font(self, s, prop): _log.debug('%s - get_wx_font()', type(self)) key = hash(prop) font = self.fontd.get(key) if font is not None: return font size = self.points_to_pixels(prop.get_size_in_points()) self.fontd[key] = font = wx.Font(pointSize = round(size), family = self.fontnames.get(prop.get_name(), wx.ROMAN), style = self.fontangles[prop.get_style()], weight = self.fontweights[prop.get_weight()]) return font", - "docstring": "Return a wx font. Cache font instances for efficiency.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_wx.py", - "ast_data": "FunctionDef name:get_wx_font arguments arg:self arg:s arg:prop Assign Call call:hash Assign Call call:get If Compare op:IsNot Return return:yes Assign Call call:points_to_pixels Assign Call call:Font Return return:yes" - }, - { - "library": "django", - "name": "SameAsLookup", - "source_code": "@BaseSpatialField.register_lookup class SameAsLookup(GISLookup): lookup_name = 'same_as'", - "docstring": "The \"~=\" operator is the \"same as\" operator. It tests actual geometric equality of two features. So if A and B are the same feature, vertex-by-vertex, the operator returns true.", - "type": "class", - "file_path": "django\\django\\contrib\\gis\\db\\models\\lookups.py", - "ast_data": "ClassDef name:SameAsLookup Assign" - }, - { - "library": "django", - "name": "url", - "source_code": "def url(self, name): raise NotImplementedError('subclasses of Storage must provide a url() method')", - "docstring": "Return an absolute URL where the file's contents can be accessed directly by a web browser.", - "type": "method", - "file_path": "django\\django\\core\\files\\storage\\base.py", - "ast_data": "FunctionDef name:url arguments arg:self arg:name Raise raises:NotImplementedError('subclasses of Storage must provide a url() method')" - }, - { - "library": "scipy", - "name": "set_global_backend", - "source_code": "def set_global_backend(backend, coerce = False, only = False, try_last = False): backend = _backend_from_arg(backend) ua.set_global_backend(backend, coerce = coerce, only = only, try_last = try_last)", - "docstring": "Sets the global fft backend This utility method replaces the default backend for permanent use. It will be tried in the list of backends automatically, unless the `set_backend`. Notes ----- This will overwrite the previously set global backend, which, by default, is the SciPy implementation. Examples -------- We can set the global fft backend: >>> from scipy.fft import fft, set_global_backend >>> set_global_backend(\"scipy\") # Sets global backend (default is \"scipy\"). >>> fft([1]) # Calls the global backend array([1.+0.j])", - "type": "function", - "file_path": "scipy\\scipy\\fft\\_backend.py", - "ast_data": "FunctionDef name:set_global_backend arguments arg:backend arg:coerce arg:only arg:try_last Assign Call call:_backend_from_arg" - }, - { - "library": "scipy", - "name": "Step2", - "source_code": "class Step2(Benchmark): change_dimensionality = True def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-100.0] * self.N, [100.0] * self.N)) self.custom_bounds = ([-5, 5], [-5, 5]) self.global_optimum = [[0.5 for _ in range(self.N)]] self.fglob = 0.5 def fun(self, x, *args): self.nfev + = 1 return sum((floor(x) + 0.5) ** 2.0)", - "docstring": "Step objective function. This class defines the Step 2 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Step}}(x) = \\sum_{i=1}^{n} \\left ( \\lfloor x_i + 0.5 \\rfloor \\right )^2 Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: for :math: .. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_S.py", - "ast_data": "ClassDef name:Step2 Assign FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Return return:yes" - }, - { - "library": "django", - "name": "select_format", - "source_code": "def select_format(self, compiler, sql, params): return (sql, params)", - "docstring": "Custom format for select clauses. For example, GIS columns need to be selected as AsText(table.col) on MySQL as the table.col data can't be used by Django.", - "type": "method", - "file_path": "django\\django\\db\\models\\fields\\__init__.py", - "ast_data": "FunctionDef name:select_format arguments arg:self arg:compiler arg:sql arg:params Return return:yes" - }, - { - "library": "tensorflow", - "name": "add_function_def", - "source_code": "def add_function_def(self, fdef): self.ensure_initialized() if is_oss: fdef_string = fdef.SerializeToString() pywrap_tfe.TFE_ContextAddFunctionDef(self._handle, fdef_string, len(fdef_string)) else: pywrap_tfe.TFE_ContextAddFunctionDefNoSerialization(self._handle, fdef)", - "docstring": "Add a function definition to the context. Once added, the function (identified by its name) can be executed like any other operation. Args: fdef: A FunctionDef protocol buffer message.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", - "ast_data": "FunctionDef name:add_function_def arguments arg:self arg:fdef If Assign Call call:SerializeToString" - }, - { - "library": "pytorch", - "name": "build_plan", - "source_code": "@abc.abstractmethod def build_plan(self, module: nn.Module) -> ShardingPlan: pass", - "docstring": "Given a nn.Module, define how to shard the module across ranks, return a ShardingPlan Args: module (:class:): The module to apply sharding to. Returns: A :class: object that represents how to shard the module.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_plan\\api.py", - "ast_data": "FunctionDef name:build_plan arguments arg:self arg:module type:nn.Module" - }, - { - "library": "scipy", - "name": "random", - "source_code": "def random(self, n: IntNumber = 1) -> np.ndarray: sample = np.empty((n, len(self.pvals))) for i in range(n): base_draws = self.engine.random(self.n_trials).ravel() p_cumulative = np.empty_like(self.pvals, dtype = float) _fill_p_cumulative(np.array(self.pvals, dtype = float), p_cumulative) sample_ = np.zeros_like(self.pvals, dtype = np.intp) _categorize(base_draws, p_cumulative, sample_) sample[i] = sample_ return sample", - "docstring": "Draw QMC samples from the multinomial distribution. Parameters ---------- n : int, optional Number of samples to generate in the parameter space. Default is 1. Returns ------- samples : array_like (n, pvals) Sample.", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_qmc.py", - "ast_data": "FunctionDef name:random arguments arg:self arg:n type:IntNumber Assign Call call:empty For Call call:range Assign Call call:ravel Assign Call call:empty_like Assign Call call:zeros_like Assign Return return:yes" - }, - { - "library": "algorithms", - "name": "optimal_set_cover", - "source_code": "def optimal_set_cover(universe, subsets, costs): pset = powerset(subsets.keys()) best_set = None best_cost = float('inf') for subset in pset: covered = set() cost = 0 for s in subset: covered.update(subsets[s]) cost + = costs[s] if len(covered) = = len(universe) and cost < best_cost: best_set = subset best_cost = cost return best_set", - "docstring": "Optimal algorithm - DONT USE ON BIG INPUTS - O(2^n) complexity! Finds the minimum cost subcollection os S that covers all elements of U Args: universe (list): Universe of elements subsets (dict): Subsets of U {S1:elements,S2:elements} costs (dict): Costs of each subset in S - {S1:cost, S2:cost...}", - "type": "function", - "file_path": "algorithms\\algorithms\\set\\set_covering.py", - "ast_data": "FunctionDef name:optimal_set_cover arguments arg:universe arg:subsets arg:costs Assign Call call:powerset Assign Assign Call call:float For Assign Call call:set Assign For If BoolOp Compare op:Eq Compare op:Lt Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "maximum", - "source_code": "@property def maximum(self): return self._maximum", - "docstring": "Returns a NumPy array specifying the maximum bounds (inclusive).", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py", - "ast_data": "FunctionDef name:maximum arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_clip_rectangle", - "source_code": "def get_clip_rectangle(self): return self._cliprect", - "docstring": "Return the clip rectangle as a instance.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", - "ast_data": "FunctionDef name:get_clip_rectangle arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "is_initialized", - "source_code": "def is_initialized(self, name = None): if values_util.is_saving_non_distributed(): return self._primary.is_initialized() if self._use_packed_variable(): return self._packed_var.is_initialized() result = self._primary.is_initialized() for v in self._values[1: -1]: result = math_ops.logical_and(result, v.is_initialized()) result = math_ops.logical_and(result, self._values[-1].is_initialized(), name = name) return result", - "docstring": "Identifies if all the component variables are initialized. Args: name: Name of the final op. Returns: The op that evaluates to True or False depending on if all the component variables are initialized.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", - "ast_data": "FunctionDef name:is_initialized arguments arg:self arg:name If Call call:is_saving_non_distributed Return return:yes If Call call:_use_packed_variable Return return:yes Assign Call call:is_initialized For Assign Call call:logical_and Assign Call call:logical_and Return return:yes" - }, - { - "library": "authlib", - "name": "validate_request_object_signing_alg", - "source_code": "def validate_request_object_signing_alg(self): self._validate_claim_value('request_object_signing_alg')", - "docstring": "JWS [JWS] alg algorithm [JWA] that MUST be used for signing Request Objects sent to the OP. All Request Objects from this Client MUST be rejected, if not signed with this algorithm. Request Objects are described in Section 6.1 of OpenID Connect Core 1.0 [OpenID.Core]. This algorithm MUST be used both when the Request Object is passed by value (using the request parameter) and when it is passed by reference (using the request_uri parameter). Servers SHOULD support RS256. The value none MAY be used. The default, if omitted, is that any algorithm supported by the OP and the RP MAY be used.", - "type": "method", - "file_path": "authlib\\authlib\\oidc\\registration\\claims.py", - "ast_data": "FunctionDef name:validate_request_object_signing_alg arguments arg:self" - }, - { - "library": "cherrypy", - "name": "json_in", - "source_code": "def json_in(content_type = [ntou('application/json'), ntou('text/javascript')], force = True, debug = False, processor = json_processor): request = cherrypy.serving.request if isinstance(content_type, text_or_bytes): content_type = [content_type] if force: if debug: cherrypy.log('Removing body processors %s' % repr(request.body.processors.keys()), 'TOOLS.JSON_IN') request.body.processors.clear() request.body.default_proc = cherrypy.HTTPError(415, 'Expected an entity of content type %s' % ', '.join(content_type)) for ct in content_type: if debug: cherrypy.log('Adding body processor for %s' % ct, 'TOOLS.JSON_IN') request.body.processors[ct] = processor", - "docstring": "Add a processor to parse JSON request entities. The default processor places the parsed data into request.json. Incoming request entities which match the given content_type(s) will be deserialized from JSON to the Python equivalent, and the result stored at cherrypy.request.json. The 'content_type' argument may be a Content-Type string or a list of allowable Content-Type strings. If the 'force' argument is True (the default), then entities of other content types will not be allowed; \"415 Unsupported Media Type\" is raised instead. Supply your own processor to use a custom decoder, or to handle the parsed data differently. The processor can be configured via tools.json_in.processor or via the decorator method. Note that the deserializer requires the client send a Content-Length request header, or it will raise \"411 Length Required\". If for any other reason the request entity cannot be deserialized from JSON, it will raise \"400 Bad Request: Invalid JSON document\".", - "type": "function", - "file_path": "cherrypy\\cherrypy\\lib\\jsontools.py", - "ast_data": "FunctionDef name:json_in arguments arg:content_type arg:force arg:debug arg:processor Assign If Call call:isinstance Assign If If Assign Call call:HTTPError For If Assign" - }, - { - "library": "django", - "name": "clean_username", - "source_code": "def clean_username(self, username, request): backend_str = request.session[auth.BACKEND_SESSION_KEY] backend = auth.load_backend(backend_str) try: username = backend.clean_username(username) except AttributeError: pass return username", - "docstring": "Allow the backend to clean the username, if the backend defines a clean_username method.", - "type": "method", - "file_path": "django\\django\\contrib\\auth\\middleware.py", - "ast_data": "FunctionDef name:clean_username arguments arg:self arg:username arg:request Assign Assign Call call:load_backend Try Assign Call call:clean_username ExceptHandler Return return:yes" - }, - { - "library": "pytorch", - "name": "save_on_cpu", - "source_code": "class save_on_cpu(saved_tensors_hooks): def __init__(self, pin_memory: bool = False, device_type: str = 'cuda') -> None: device_module = getattr(torch, device_type, torch.cuda) def pack_to_cpu(tensor: torch.Tensor) -> tuple[torch.device, torch.Tensor]: if not pin_memory: return (tensor.device, tensor.cpu()) packed = torch.empty(tensor.size(), dtype = tensor.dtype, layout = tensor.layout, pin_memory = device_module.is_available() and (not tensor.is_sparse)) packed.copy_(tensor) return (tensor.device, packed) def unpack_from_cpu(packed: tuple[torch.device, torch.Tensor]) -> torch.Tensor: device, tensor = packed return tensor.to(device, non_blocking = pin_memory) super().__init__(pack_to_cpu, unpack_from_cpu)", - "docstring": "Context manager under which tensors saved by the forward pass will be stored on cpu, then retrieved for backward. When performing operations within this context manager, intermediary results saved in the graph during the forward pass will be moved to CPU, then copied back to the original device when needed for the backward pass. If the graph was already on CPU, no tensor copy is performed. Use this context-manager to trade compute for GPU memory usage (e.g. when your model doesn't fit in GPU memory during training). Args: pin_memory (bool): If `cuda-memory-pinning`. Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA) >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) >>> a = torch.randn(5, requires_grad=True, device=\"cuda\") >>> b = torch.randn(5, requires_grad=True, device=\"cuda\") >>> c = torch.randn(5, requires_grad=True, device=\"cuda\") >>> >>> def f(a, b, c): ... prod_1 = a * b # a and b are saved on GPU ... with torch.autograd.graph.save_on_cpu(): ... prod_2 = prod_1 * c # prod_1 and c are saved on CPU ... y = prod_2 * a # prod_2 and a are saved on GPU ... return y >>> >>> y = f(a, b, c) >>> del a, b, c # for illustration only >>> # the content of a, b, and prod_2 are still alive on GPU >>> # the content of prod_1 and c only live on CPU >>> y.sum().backward() # all CPU tensors are moved back to GPU, for backward >>> # all intermediary tensors are released (deleted) after the call to backward", - "type": "class", - "file_path": "pytorch\\torch\\autograd\\graph.py", - "ast_data": "ClassDef name:save_on_cpu FunctionDef name:__init__ arguments arg:self arg:pin_memory type:bool arg:device_type type:str Assign Call call:getattr FunctionDef name:pack_to_cpu arguments arg:tensor type:torch.Tensor If Return return:yes Assign Call call:empty Return return:yes FunctionDef name:unpack_from_cpu arguments arg:packed type:tuple[torch.device, torch.Tensor] Assign Return return:yes" - }, - { - "library": "scipy", - "name": "todia", - "source_code": "def todia(self, copy = False): return self.tocoo(copy = copy).todia(copy = False)", - "docstring": "Convert this array/matrix to sparse DIAgonal format. With copy=False, the data/indices may be shared between this array/matrix and the resultant dia_array/matrix.", - "type": "method", - "file_path": "scipy\\scipy\\sparse\\_base.py", - "ast_data": "FunctionDef name:todia arguments arg:self arg:copy Return return:yes" - }, - { - "library": "scikit-learn", - "name": "process_routing", - "source_code": "def process_routing(_obj, _method, /, **kwargs): if not kwargs: class EmptyRequest: def get(self, name, default = None): return Bunch(**{method: dict() for method in METHODS}) def __getitem__(self, name): return Bunch(**{method: dict() for method in METHODS}) def __getattr__(self, name): return Bunch(**{method: dict() for method in METHODS}) return EmptyRequest() if not (hasattr(_obj, 'get_metadata_routing') or isinstance(_obj, MetadataRouter)): raise AttributeError(f'The given object ({_obj.__class__.__name__!r}) needs to either implement the routing method `get_metadata_routing` or be a `MetadataRouter` instance.') if _method not in METHODS: raise TypeError(f'Can only route and process input on these methods: {METHODS}, while the passed method is: {_method}.') request_routing = get_routing_for_object(_obj) request_routing.validate_metadata(params = kwargs, method = _method) routed_params = request_routing.route_params(params = kwargs, caller = _method) return routed_params", - "docstring": "Validate and route input parameters. This function is used inside a router's method, e.g. :term:, to validate the metadata and handle the routing. Assuming this signature of a router's fit method: `~utils.Bunch~sklearn.utils.Bunchobj.get_metadata_routing()`.", - "type": "function", - "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py", - "ast_data": "FunctionDef name:process_routing arguments kwarg:kwargs If ClassDef name:EmptyRequest FunctionDef name:get arguments arg:self arg:name arg:default Return return:yes FunctionDef name:__getitem__ arguments arg:self arg:name Return return:yes FunctionDef name:__getattr__ arguments arg:self arg:name Return return:yes Return return:yes If Raise raises:AttributeError(f'The given object ({_obj.__class__.__name__!r}) needs to either implement the routing method `get_metadata_routing` or be a `MetadataRouter` instance.') If Compare op:NotIn Raise raises:TypeError(f'Can only route and process input on these methods: {METHODS}, while the passed method is: {_method}.') Assign Call call:get_routing_for_object Assign Call call:route_params Return return:yes" - }, - { - "library": "django", - "name": "RightLookup", - "source_code": "@BaseSpatialField.register_lookup class RightLookup(GISLookup): lookup_name = 'right'", - "docstring": "The 'right' operator returns true if A's bounding box is strictly to the right of B's bounding box.", - "type": "class", - "file_path": "django\\django\\contrib\\gis\\db\\models\\lookups.py", - "ast_data": "ClassDef name:RightLookup Assign" - }, - { - "library": "tensorflow", - "name": "executor_scope", - "source_code": "@tf_contextlib.contextmanager def executor_scope(e): ctx = context() executor_old = ctx.executor try: ctx.executor = e yield finally: ctx.executor = executor_old", - "docstring": "Context manager for changing executor for current thread. Args: e: A Executor to execute eager ops under this scope. Setting it to None will switch back to use the default executor for the context. Yields: Context manager for setting the executor for current thread.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", - "ast_data": "FunctionDef name:executor_scope arguments arg:e Assign Call call:context Assign Try Assign Assign" - }, - { - "library": "pytorch", - "name": "__init__", - "source_code": "def __init__(self, input_nodes: list[ir.IRNode], layout: ir.Layout, num_threads: int, register_blocking: GemmBlocking, beta: int = 1, alpha: int = 1, has_bias: bool = False, epilogue_creator: Optional[Callable[[ir.Buffer], ir.Pointwise]] = None, act_mapping: Optional[dict[int, ir.IRNode]] = None, gemm_grouped_num: int = 1) -> None: super().__init__(input_nodes, layout, num_threads, register_blocking, beta, alpha, has_bias, epilogue_creator) self.act_mapping = act_mapping self.gemm_grouped_num = gemm_grouped_num self.output_node: list[ir.Buffer] = [ir.Buffer(name = 'buf_out' + str(idx), layout = layout) for idx in range(gemm_grouped_num)]", - "docstring": "Template for Group of GEMMs: * Each GEMM has the same dimensions (m, n, k) and the same leading dimensions (lda, ldb, ldc) for their A, B, and C matrices. * Each GEMM has distinct or shared activations, has distinct weight, has unique bias or no bias, has distinct epilogues. * In the current implementation, the outputs of all GEMMs are accumulated using pointwise epilogues. This behavior can be extended in the future if needed.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_grouped_gemm_template.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:input_nodes type:list[ir.IRNode] arg:layout type:ir.Layout arg:num_threads type:int arg:register_blocking type:GemmBlocking arg:beta type:int arg:alpha type:int arg:has_bias type:bool arg:epilogue_creator type:Optional[Callable[[ir.Buffer], ir.Pointwise]] arg:act_mapping type:Optional[dict[int, ir.IRNode]] arg:gemm_grouped_num type:int Assign Assign" - }, - { - "library": "kornia", - "name": "set_rng_device_and_dtype", - "source_code": "def set_rng_device_and_dtype(self, device: torch.device, dtype: torch.dtype) -> None: self.device = device self.dtype = dtype if self._param_generator is not None: self._param_generator.set_rng_device_and_dtype(device, dtype)", - "docstring": "Change the random generation device and dtype. Note: The generated random numbers are not reproducible across different devices and dtypes.", - "type": "method", - "file_path": "kornia\\kornia\\augmentation\\base.py", - "ast_data": "FunctionDef name:set_rng_device_and_dtype arguments arg:self arg:device type:torch.device arg:dtype type:torch.dtype Assign Assign If Compare op:IsNot" - }, - { - "library": "pytorch", - "name": "is_exporting", - "source_code": "def is_exporting() -> bool: return _is_exporting_flag", - "docstring": "Indicated whether we're under exporting. It's stricter than is_compiling() flag, as it would only be set to True when torch.export is used. Example:: >>> def forward(self, x): >>> if not torch.compiler.is_exporting(): >>> pass # ...logic that is not needed in export... >>> >>> # ...rest of the function...", - "type": "function", - "file_path": "pytorch\\torch\\compiler\\__init__.py", - "ast_data": "FunctionDef name:is_exporting arguments Return return:yes" - }, - { - "library": "numpy", - "name": "rfftfreq", - "source_code": "@set_module('numpy.fft') def rfftfreq(n, d = 1.0, device = None): if not isinstance(n, integer_types): raise ValueError('n should be an integer') val = 1.0 / (n * d) N = n // 2 + 1 results = arange(0, N, dtype = int, device = device) return results * val", - "docstring": "Return the Discrete Fourier Transform sample frequencies (for usage with rfft, irfft). The returned float array contains the frequency bin centers in cycles per unit of the sample spacing (with zero at the start). For instance, if the sample spacing is in seconds, then the frequency unit is cycles/second. Given a window length and a sample spacing :: f = [0, 1, ..., n/2-1, n/2] / (d*n) if n is even f = [0, 1, ..., (n-1)/2-1, (n-1)/2] / (d*n) if n is odd Unlike (but like ) the Nyquist frequency component is considered to be positive. Parameters ---------- n : int Window length. d : scalar, optional Sample spacing (inverse of the sampling rate). Defaults to 1. device : str, optional The device on which to place the created array. Default: `` containing the sample frequencies. Examples -------- >>> import numpy as np >>> signal = np.array([-2, 8, 6, 4, 1, 0, 3, 5, -3, 4], dtype=float) >>> fourier = np.fft.rfft(signal) >>> n = signal.size >>> sample_rate = 100 >>> freq = np.fft.fftfreq(n, d=1./sample_rate) >>> freq array([ 0., 10., 20., ..., -30., -20., -10.]) >>> freq = np.fft.rfftfreq(n, d=1./sample_rate) >>> freq array([ 0., 10., 20., 30., 40., 50.])", - "type": "function", - "file_path": "numpy\\numpy\\fft\\_helper.py", - "ast_data": "FunctionDef name:rfftfreq arguments arg:n arg:d arg:device Call call:set_module If Raise raises:ValueError('n should be an integer') Assign Assign Assign Call call:arange Return return:yes" - }, - { - "library": "pytorch", - "name": "FlattenInputWithTreeSpecValidationInputStep", - "source_code": "class FlattenInputWithTreeSpecValidationInputStep(InputAdaptStep): _spec: pytree.TreeSpec | None = None def apply(self, model_args: Sequence[Any], model_kwargs: Mapping[str, Any], model: torch.nn.Module | Callable | torch_export.ExportedProgram | None = None) -> tuple[Sequence[Any], Mapping[str, Any]]: flattened_args, spec = pytree.tree_flatten((model_args, model_kwargs)) if self._spec is None: self._spec = spec else: _assert_identical_pytree_spec(self._spec, spec, error_message = 'Model inputs incompatible with the format that was exported. ') return (flattened_args, {})", - "docstring": "Flatten nested collection types and return a flat list of elements. ONNX can't represent collection types (e.g., dictionary, tuple of tuple of tensor, etc). This class stores the output produced when was called the first time. It then validates the output produced from later calls.", - "type": "class", - "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py", - "ast_data": "ClassDef name:FlattenInputWithTreeSpecValidationInputStep FunctionDef name:apply arguments arg:self arg:model_args type:Sequence[Any] arg:model_kwargs type:Mapping[str, Any] arg:model type:torch.nn.Module | Callable | torch_export.ExportedProgram | None Assign Call call:tree_flatten If Compare op:Is Assign Return return:yes" - }, - { - "library": "kornia", - "name": "left_jacobian", - "source_code": "@staticmethod def left_jacobian(vec: Tensor) -> Tensor: R_skew = vector_to_skew_symmetric_matrix(vec) theta = vec.norm(dim = -1, keepdim = True)[..., None] I = eye(3, device = vec.device, dtype = vec.dtype) Jl = I + (1 - theta.cos()) / theta ** 2 * R_skew + (theta - theta.sin()) / theta ** 3 * (R_skew @ R_skew) return Jl", - "docstring": "Compute the left Jacobian of So3. Args: vec: the input point of shape :math:. Example: >>> vec = torch.tensor([1., 2., 3.]) >>> So3.left_jacobian(vec) tensor([[-0.0687, -0.2267, 0.5074], [ 0.5556, 0.1779, 0.3629], [-0.0141, 0.6236, 0.5890]])", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py", - "ast_data": "FunctionDef name:left_jacobian arguments arg:vec type:Tensor Assign Call call:vector_to_skew_symmetric_matrix Assign Assign Call call:eye Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "ShardingPolicy", - "source_code": "@tf_export('data.experimental.service.ShardingPolicy') class ShardingPolicy(enum.IntEnum): OFF = 0 DYNAMIC = 1 FILE = 2 DATA = 3 FILE_OR_DATA = 4 HINT = 5 def _to_proto(self) -> data_service_pb2.ProcessingModeDef.ShardingPolicy: if self = = ShardingPolicy.OFF: return data_service_pb2.ProcessingModeDef.OFF if self = = ShardingPolicy.DYNAMIC: return data_service_pb2.ProcessingModeDef.DYNAMIC if self = = ShardingPolicy.FILE: return data_service_pb2.ProcessingModeDef.FILE if self = = ShardingPolicy.DATA: return data_service_pb2.ProcessingModeDef.DATA if self = = ShardingPolicy.FILE_OR_DATA: return data_service_pb2.ProcessingModeDef.FILE_OR_DATA if self = = ShardingPolicy.HINT: return data_service_pb2.ProcessingModeDef.HINT raise ValueError(f'Unable to convert sharding policy {self!r} to proto.')", - "docstring": "Specifies how to shard data among tf.data service workers. OFF: No sharding will be performed. Each worker produces the entire dataset without any sharding. With this mode, the best practice is to shuffle the dataset nondeterministically so that workers process the dataset in different orders. If workers are restarted or join the cluster mid-job, they will begin processing the dataset from the beginning. DYNAMIC: The input dataset is dynamically split among workers at runtime. Each worker gets the next split when it reads data from the dispatcher. Data is produced non-deterministically in this mode. Dynamic sharding works well with varying-sized tf.data service clusters, e.g., when you need to auto-scale your workers. Dynamic sharding provides at-most once visitation guarantees. No examples will be repeated, but some may be missed if a tf.data service worker gets restarted while processing a file. The following are static sharding policies. The semantics are similar to . These policies require: * The tf.data service cluster is configured with a fixed list of workers in DispatcherConfig. * Each client only reads from the local tf.data service worker. If a worker is restarted while performing static sharding, the worker will begin processing its shard again from the beginning. FILE: Shards by input files (i.e. each worker will get a fixed set of files to process). When this option is selected, make sure that there is at least as many files as workers. If there are fewer input files than workers, a runtime error will be raised. DATA: Shards by elements produced by the dataset. Each worker will process the whole dataset and discard the portion that is not for itself. Note that for this mode to correctly partition the dataset elements, the dataset needs to produce elements in a deterministic order. FILE_OR_DATA: Attempts FILE-based sharding, falling back to DATA-based sharding on failure. HINT: Looks for the presence of which is treated as a placeholder to replace with .", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\data_service_ops.py", - "ast_data": "ClassDef name:ShardingPolicy Call call:tf_export Assign Assign Assign Assign Assign Assign FunctionDef name:_to_proto arguments arg:self If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes Raise raises:ValueError(f'Unable to convert sharding policy {self!r} to proto.')" - }, - { - "library": "django", - "name": "from_bbox", - "source_code": "@classmethod def from_bbox(cls, bbox): x0, y0, x1, y1 = bbox for z in bbox: if not isinstance(z, (float, int)): return GEOSGeometry('POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' % (x0, y0, x0, y1, x1, y1, x1, y0, x0, y0)) return Polygon(((x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)))", - "docstring": "Construct a Polygon from a bounding box (4-tuple).", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\geos\\polygon.py", - "ast_data": "FunctionDef name:from_bbox arguments arg:cls arg:bbox Assign For If Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "fill_object_graph_proto", - "source_code": "def fill_object_graph_proto(self, proto: saved_object_graph_pb2.SavedObjectGraph): for node_id, node in enumerate(self.nodes): assert self.node_ids[node] = = node_id object_proto = proto.nodes.add() object_proto.slot_variables.extend(self._slot_variables.get(node, ())) if isinstance(node, _CapturedTensor): continue for child in self.augmented_graph_view.list_children(node): child_proto = object_proto.children.add() child_proto.node_id = self.node_ids[child.ref] child_proto.local_name = child.name for name, ref in self.augmented_graph_view.list_dependencies(node): child_proto = object_proto.dependencies.add() child_proto.node_id = self.node_ids[ref] child_proto.local_name = name if node in self._saveable_objects_map: assert node not in self._obj_to_registered_saver, \"Objects can't have both SaveableObjects and a registered saver\" for local_name, (save_fn, restore_fn) in self._saveable_objects_map[node].items(): saveable_object_proto = object_proto.saveable_objects[local_name] saveable_object_proto.save_function = self.node_ids[save_fn] saveable_object_proto.restore_function = self.node_ids[restore_fn] elif node in self._obj_to_registered_saver: object_proto.registered_saver = self._obj_to_registered_saver[node]", - "docstring": "Populate the nodes, children and slot_variables of a SavedObjectGraph.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\saved_model\\save.py", - "ast_data": "FunctionDef name:fill_object_graph_proto arguments arg:self arg:proto type:saved_object_graph_pb2.SavedObjectGraph For Call call:enumerate Assign Call call:add If Call call:isinstance For Call call:list_children Assign Call call:add Assign Assign For Call call:list_dependencies Assign Call call:add Assign Assign If Compare op:In For Call call:items Assign Assign Assign If Compare op:In Assign" - }, - { - "library": "sphinx", - "name": "run", - "source_code": "def run(self) -> list[Node]: if ': ' in self.name: self.domain, self.objtype = self.name.split(': ', 1) else: self.domain, self.objtype = ('', self.name) node = addnodes.desc() node.document = self.state.document node['domain'] = self.domain node['objtype'] = node['desctype'] = self.objtype node['no-index'] = True self.names: list[str] = [] alias_options = {'maxdepth': self.options.get('maxdepth', 1), 'noroot': 'noroot' in self.options} if alias_options['noroot'] and alias_options['maxdepth'] = = 1: logger.warning(\"Error in C alias declaration. Requested 'noroot' but 'maxdepth' 1. When skipping the root declaration, need 'maxdepth' 0 for infinite or at least 2.\", location = self.get_location()) for sig in self.get_signatures(): node.append(AliasNode(sig, alias_options, self.state.document, env = self.env)) return [node]", - "docstring": "On purpose this doesn't call the ObjectDescription version, but is based on it. Each alias signature may expand into multiple real signatures if 'noroot'. The code is therefore based on the ObjectDescription version.", - "type": "method", - "file_path": "sphinx\\sphinx\\domains\\c\\__init__.py", - "ast_data": "FunctionDef name:run arguments arg:self If Compare op:In Assign Call call:split Assign Assign Call call:desc Assign Assign Assign Assign Assign If BoolOp Compare op:Eq For Call call:get_signatures Return return:yes" - }, - { - "library": "pytorch", - "name": "all_gather", - "source_code": "def all_gather(self, step: str, map_fun: Callable[[], T]) -> list[T]: result: Union[T, WRAPPED_EXCEPTION] try: result = map_fun() except BaseException as e: result = _wrap_exception(e) all_results = self.all_gather_object(result) node_failures = _get_failure_dict(all_results) if len(node_failures) > 0: raise CheckpointException(step, node_failures) return cast(list[T], all_results)", - "docstring": "Compute a value on each rank, then all_gather them. This method operates in the following way: Run `` on all ranks all_gather the values to all ranks", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\checkpoint\\utils.py", - "ast_data": "FunctionDef name:all_gather arguments arg:self arg:step type:str arg:map_fun type:Callable[[], T] Try Assign Call call:map_fun ExceptHandler Assign Call call:_wrap_exception Assign Call call:all_gather_object Assign Call call:_get_failure_dict If Compare op:Gt Raise raises:CheckpointException(step, node_failures) Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_figheight", - "source_code": "def set_figheight(self, val, forward = True): self.set_size_inches(self.get_figwidth(), val, forward = forward)", - "docstring": "Set the height of the figure in inches. Parameters ---------- val : float forward : bool See . See Also -------- matplotlib.figure.Figure.set_figwidth matplotlib.figure.Figure.set_size_inches", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\figure.py", - "ast_data": "FunctionDef name:set_figheight arguments arg:self arg:val arg:forward" - }, - { - "library": "django", - "name": "get_reverse_path_info", - "source_code": "def get_reverse_path_info(self, filtered_relation = None): opts = self.model._meta from_opts = self.remote_field.model._meta return [PathInfo(from_opts = from_opts, to_opts = opts, target_fields = (opts.pk,), join_field = self.remote_field, m2m = not self.unique, direct = False, filtered_relation = filtered_relation)]", - "docstring": "Get path from the related model to this field's model.", - "type": "method", - "file_path": "django\\django\\db\\models\\fields\\related.py", - "ast_data": "FunctionDef name:get_reverse_path_info arguments arg:self arg:filtered_relation Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "conv2d_input", - "source_code": "def conv2d_input(input_size, weight, grad_output, stride = 1, padding = 0, dilation = 1, groups = 1): input = grad_output.new_empty(1).expand(input_size) return torch.ops.aten.convolution_backward(grad_output, input, weight, None, _pair(stride), _pair(padding), _pair(dilation), False, [0], groups, (True, False, False))[0]", - "docstring": "Compute the gradient of conv2d with respect to the input of the convolution. This is same as the 2D transposed convolution operator under the hood but requires the shape of the gradient w.r.t. input to be specified explicitly. Args: input_size : Shape of the input gradient tensor weight: weight tensor (out_channels x in_channels/groups x kH x kW) grad_output : output gradient tensor (minibatch x out_channels x oH x oW) stride (int or tuple, optional): Stride of the convolution. Default: 1 padding (int or tuple, optional): Zero-padding added to both sides of the input. Default: 0 dilation (int or tuple, optional): Spacing between kernel elements. Default: 1 groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1 Examples:: >>> input = torch.randn(1, 1, 3, 3, requires_grad=True) >>> weight = torch.randn(1, 1, 1, 2, requires_grad=True) >>> output = F.conv2d(input, weight) >>> grad_output = torch.randn(output.shape) >>> grad_input = torch.autograd.grad(output, input, grad_output) >>> F.grad.conv2d_input(input.shape, weight, grad_output)", - "type": "function", - "file_path": "pytorch\\torch\\nn\\grad.py", - "ast_data": "FunctionDef name:conv2d_input arguments arg:input_size arg:weight arg:grad_output arg:stride arg:padding arg:dilation arg:groups Assign Call call:expand Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_linewidth", - "source_code": "def set_linewidth(self, linewidth): self.patch.set_linewidth(linewidth)", - "docstring": "Set the line width of the Figure rectangle. Parameters ---------- linewidth : number", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\figure.py", - "ast_data": "FunctionDef name:set_linewidth arguments arg:self arg:linewidth" - }, - { - "library": "pandas", - "name": "validate_data_columns", - "source_code": "def validate_data_columns(self, data_columns, min_itemsize, non_index_axes) -> list: if not len(non_index_axes): return [] axis, axis_labels = non_index_axes[0] info = self.info.get(axis, {}) if info.get('type') = = 'MultiIndex' and data_columns: raise ValueError(f'cannot use a multi-index on axis [{axis}] with data_columns {data_columns}') if data_columns is True: data_columns = list(axis_labels) elif data_columns is None: data_columns = [] if isinstance(min_itemsize, dict): existing_data_columns = set(data_columns) data_columns = list(data_columns) data_columns.extend([k for k in min_itemsize.keys() if k ! = 'values' and k not in existing_data_columns]) return [c for c in data_columns if c in axis_labels]", - "docstring": "take the input data_columns and min_itemize and create a data columns spec", - "type": "method", - "file_path": "pandas\\pandas\\io\\pytables.py", - "ast_data": "FunctionDef name:validate_data_columns arguments arg:self arg:data_columns arg:min_itemsize arg:non_index_axes If Return return:yes Assign Assign Call call:get If BoolOp Compare op:Eq Raise raises:ValueError(f'cannot use a multi-index on axis [{axis}] with data_columns {data_columns}') If Compare op:Is Assign Call call:list If Compare op:Is Assign If Call call:isinstance Assign Call call:set Assign Call call:list Return return:yes" - }, - { - "library": "pytorch", - "name": "get_constant_value", - "source_code": "def get_constant_value(x: ir.IRNode) -> Optional[ir.Constant]: if isinstance(x, ir.MutableBox): return get_constant_value(x.data) if isinstance(x, ir.BaseView): return get_constant_value(x.unwrap_view()) if isinstance(x, ir.Constant): return x if not isinstance(x, ir.Loops): return None handler = torch._inductor.ops_handler.ExtractConstantsHandler(x.get_device()) with V.set_ops_handler(handler), patch.object(ir.FlexibleLayout, 'allow_indexing', True): out = x.inner_fn(*x.inner_fn_args()) assert isinstance(out, torch._inductor.virtualized.OpsValue) if isinstance(out.value, ir.Constant): return out.value return None", - "docstring": "Try convert an arbitrary IR node into an ir.Constant value", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\lowering.py", - "ast_data": "FunctionDef name:get_constant_value arguments arg:x type:ir.IRNode If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes If Return return:yes Assign Call call:ExtractConstantsHandler With Assign Call call:inner_fn If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "boundary", - "source_code": "@property def boundary(self): return self._topology(capi.geos_boundary(self.ptr))", - "docstring": "Return the boundary as a newly allocated Geometry object.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", - "ast_data": "FunctionDef name:boundary arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "get_fusion_pattern_to_extra_inputs_getter", - "source_code": "def get_fusion_pattern_to_extra_inputs_getter(backend_config: BackendConfig) -> dict[Pattern, Callable]: extra_inputs_getter_mapping: dict[Pattern, Callable] = {} for pattern, config in backend_config._pattern_complex_format_to_config.items(): if config._extra_inputs_getter is not None: extra_inputs_getter_mapping[pattern] = config._extra_inputs_getter return extra_inputs_getter_mapping", - "docstring": "Get a map from fusion pattern to a function that returns extra input nodes from the fusion pattern, in the order required by the root node. This is optional, if not specified, we will not copy over any extra inputs for the root node. Example: # Let's say we have the pattern (torch.add, MatchAllNode, (torch.nn.BatchNorm2d, torch.nn.Conv2d)) # and root node is torch.nn.Conv2d, and the node in MatchAllNode would be an extra # argument to the fused module, we can unpack the pattern and return the node at # MatchAllNode here # we can implement extra_inputs_getter as follows: def extra_inputs_getter(pattern) -> List[Any]: add, extra_input, conv_pattern = pattern return [extra_input]", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\utils.py", - "ast_data": "FunctionDef name:get_fusion_pattern_to_extra_inputs_getter arguments arg:backend_config type:BackendConfig For Call call:items If Compare op:IsNot Assign Return return:yes" - }, - { - "library": "django", - "name": "get_migration_by_prefix", - "source_code": "def get_migration_by_prefix(self, app_label, name_prefix): results = [] for migration_app_label, migration_name in self.disk_migrations: if migration_app_label = = app_label and migration_name.startswith(name_prefix): results.append((migration_app_label, migration_name)) if len(results) > 1: raise AmbiguityError(\"There is more than one migration for '%s' with the prefix '%s'\" % (app_label, name_prefix)) elif not results: raise KeyError(f\"There is no migration for '{app_label}' with the prefix '{name_prefix}'\") else: return self.disk_migrations[results[0]]", - "docstring": "Return the migration(s) which match the given app label and name_prefix.", - "type": "method", - "file_path": "django\\django\\db\\migrations\\loader.py", - "ast_data": "FunctionDef name:get_migration_by_prefix arguments arg:self arg:app_label arg:name_prefix Assign For If BoolOp Compare op:Eq Call call:startswith If Compare op:Gt Raise raises:AmbiguityError(\"There is more than one migration for '%s' with the prefix '%s'\" % (app_label, name_prefix)) If Raise raises:KeyError(f\"There is no migration for '{app_label}' with the prefix '{name_prefix}'\") Return return:yes" - }, - { - "library": "tensorflow", - "name": "num_accelerators", - "source_code": "def num_accelerators(self, task_type = None, task_id = None, config_proto = None): if self._tpu = = 'local': return {'TPU': len([d for d in framework_config.list_logical_devices() if d.device_type = = 'TPU'])} retry_count = 1 while True: try: device_details = TPUClusterResolver._get_device_dict_and_cores(cluster_resolver_lib.get_accelerator_devices(self.master(), config_proto = config_proto)) break except errors.DeadlineExceededError: error_message = 'Failed to connect to master. The TPU might not be ready (e.g. still scheduling) or the master address is incorrect: got (%s)' % self.master() if retry_count < = _TPU_CONN_RETRIES: logging.warning(error_message) logging.warning('Retrying (%d/%d)...', retry_count, _TPU_CONN_RETRIES) retry_count + = 1 else: raise RuntimeError(error_message) if device_details.total_cores: return {'TPU': TPUClusterResolver._verify_and_return_same_core_count(device_details.device_map)} return {'TPU': 0}", - "docstring": "Returns the number of TPU cores per worker. Connects to the master and list all the devices present in the master, and counts them up. Also verifies that the device counts per host in the cluster is the same before returning the number of TPU cores per host. Args: task_type: Unused. task_id: Unused. config_proto: Used to create a connection to a TPU master in order to retrieve the system metadata. Raises: RuntimeError: If we cannot talk to a TPU worker after retrying or if the number of TPU devices per host is different.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\tpu\\tpu_cluster_resolver.py", - "ast_data": "FunctionDef name:num_accelerators arguments arg:self arg:task_type arg:task_id arg:config_proto If Compare op:Eq Return return:yes Assign While Try Assign Call call:_get_device_dict_and_cores ExceptHandler Assign If Compare op:LtE Raise raises:RuntimeError(error_message) If Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "display_bytes", - "source_code": "def display_bytes(b: int, unit: str = 'MiB') -> str: if unit = = 'KiB': return f'{b / 2 ** 10: .2f} KiB' if unit = = 'MiB': return f'{b / 2 ** 20: .2f} MiB' if unit = = 'GiB': return f'{b / 2 ** 30: .2f} GiB' return f'{b: .2f} bytes'", - "docstring": "return a string that represent the number of bytes in a desired unit", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\_tools\\ilp_utils.py", - "ast_data": "FunctionDef name:display_bytes arguments arg:b type:int arg:unit type:str If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "stack_inputs", - "source_code": "def stack_inputs(self, stack_indices = None, tile_variants = False): if stack_indices is None: stack_indices = range(len(self._inputs)) length = self.pfor.loop_len_vector for i in stack_indices: inp = self._inputs[i] is_variant = inp.t.dtype = = dtypes.variant if not inp.is_stacked: self._inputs[i] = _stack(inp.t, length) if tile_variants and is_variant: self._inputs[i] = wrap(_tile_variant_with_length(self._inputs[i].t, length), True) elif not tile_variants and is_variant: self._inputs[i] = wrap(_untile_variant(self._inputs[i].t), True)", - "docstring": "Stacks unstacked inputs at . Args: stack_indices: indices of inputs at which stacking is done. If None, stacking is done at all indices. tile_variants: If True, affected indices which have a variant dtype will be tiled after this operation to match the expected shape of a vectorized tensor. Variants generally need to be un-tiled when they are inputs to operations and tiled when returned.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\parallel_for\\pfor.py", - "ast_data": "FunctionDef name:stack_inputs arguments arg:self arg:stack_indices arg:tile_variants If Compare op:Is Assign Call call:range Assign For Assign Assign Compare op:Eq If Assign Call call:_stack If BoolOp Assign Call call:wrap If BoolOp Assign Call call:wrap" - }, - { - "library": "pandas", - "name": "validate_categories", - "source_code": "@staticmethod def validate_categories(categories, fastpath: bool = False) -> Index: from pandas.core.indexes.base import Index if not fastpath and (not is_list_like(categories)): raise TypeError(f\"Parameter 'categories' must be list-like, was {categories!r}\") if not isinstance(categories, ABCIndex): categories = Index._with_infer(categories, tupleize_cols = False) if not fastpath: if categories.hasnans: raise ValueError('Categorical categories cannot be null') if not categories.is_unique: raise ValueError('Categorical categories must be unique') if isinstance(categories, ABCCategoricalIndex): categories = categories.categories return categories", - "docstring": "Validates that we have good categories Parameters ---------- categories : array-like fastpath : bool Whether to skip nan and uniqueness checks Returns ------- categories : Index", - "type": "method", - "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", - "ast_data": "FunctionDef name:validate_categories arguments arg:categories arg:fastpath type:bool If BoolOp Raise raises:TypeError(f\"Parameter 'categories' must be list-like, was {categories!r}\") If Assign Call call:_with_infer If If Raise raises:ValueError('Categorical categories cannot be null') If Raise raises:ValueError('Categorical categories must be unique') If Call call:isinstance Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "set_default_mmap_options", - "source_code": "class set_default_mmap_options: def __init__(self, flags: int) -> None: if IS_WINDOWS: raise RuntimeError('Changing the default mmap options is currently not supported for Windows') if flags ! = MAP_PRIVATE and flags ! = MAP_SHARED: raise ValueError(f'Invalid argument in function set_default_mmap_options, expected mmap.MAP_PRIVATE or mmap.MAP_SHARED, but got {flags}') from torch.utils.serialization import config self.prev = config.load.mmap_flags config.load.mmap_flags = flags def __enter__(self) -> None: pass def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None: from torch.utils.serialization import config config.load.mmap_flags = self.prev", - "docstring": "Context manager or function to set default mmap options for :func: with ``", - "type": "class", - "file_path": "pytorch\\torch\\serialization.py", - "ast_data": "ClassDef name:set_default_mmap_options FunctionDef name:__init__ arguments arg:self arg:flags type:int If Raise raises:RuntimeError('Changing the default mmap options is currently not supported for Windows') If BoolOp Compare op:NotEq Compare op:NotEq Raise raises:ValueError(f'Invalid argument in function set_default_mmap_options, expected mmap.MAP_PRIVATE or mmap.MAP_SHARED, but got {flags}') Assign Assign FunctionDef name:__enter__ arguments arg:self FunctionDef name:__exit__ arguments arg:self arg:exc_type type:Any arg:exc_value type:Any arg:traceback type:Any Assign" - }, - { - "library": "tensorflow", - "name": "filter_with_legacy_function", - "source_code": "@deprecation.deprecated(None, 'Use `tf.data.Dataset.filter()') def filter_with_legacy_function(self, predicate) -> 'DatasetV2': from tensorflow.python.data.ops import filter_op return filter_op._FilterDataset(self, predicate, use_legacy_function = True)", - "docstring": "Filters this dataset according to . Note: This is an escape hatch for existing uses of that do not work with V2 functions. New uses are strongly discouraged and existing uses should migrate to as this method will be removed in V2. Args: predicate: A function mapping a (nested) structure of tensors (having shapes and types defined by and ) to a scalar tensor. Returns: Dataset: The containing the elements of this dataset for which is .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", - "ast_data": "FunctionDef name:filter_with_legacy_function arguments arg:self arg:predicate Call call:deprecated Return return:yes" - }, - { - "library": "django", - "name": "get_date_list_period", - "source_code": "def get_date_list_period(self): return self.date_list_period", - "docstring": "Get the aggregation period for the list of dates: 'year', 'month', or 'day'.", - "type": "method", - "file_path": "django\\django\\views\\generic\\dates.py", - "ast_data": "FunctionDef name:get_date_list_period arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "number_of_shards", - "source_code": "@property def number_of_shards(self): return self._number_of_shards", - "docstring": "Returns the number of shards in the policy or None if unspecified.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_sharding.py", - "ast_data": "FunctionDef name:number_of_shards arguments arg:self Return return:yes" - }, - { - "library": "scikit-learn", - "name": "get_canonical_name_meson", - "source_code": "def get_canonical_name_meson(target, build_path): assert len(target['filename']) = = 1 shared_library_path = Path(target['filename'][0]) shared_library_relative_path = shared_library_path.relative_to(build_path.absolute()) rel_path = shared_library_relative_path.as_posix() pattern = '\\\\.(cpython|cp\\\\d+)-.+' return re.sub(pattern, '', str(rel_path))", - "docstring": "Return a name based on generated shared library. The goal is to return a name that can be easily matched with the output from . Look at docstring to see what looks like.", - "type": "function", - "file_path": "scikit-learn\\build_tools\\check-meson-openmp-dependencies.py", - "ast_data": "FunctionDef name:get_canonical_name_meson arguments arg:target arg:build_path Assign Call call:Path Assign Call call:relative_to Assign Call call:as_posix Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "find_anchor_nodes", - "source_code": "def find_anchor_nodes(self, ctx: MatchContext, searched: OrderedSet[torch.fx.Node]) -> Generator[Optional[torch.fx.Node], None, None]: if self in ctx.pattern_to_node: yield ctx.pattern_to_node[self] return for pattern in self.flat_args_kwargs[0]: if isinstance(pattern, PatternExpr): for other_node in pattern.find_anchor_nodes(ctx, searched): if not isinstance(other_node, torch.fx.Node): continue for node in other_node.users: if node not in searched: if self._match_fns(node): yield node searched.add(node)", - "docstring": "This is used when we are matching a pattern with multiple outputs. There is a partial match (stored in ctx) and we want to walk this pattern to find a connection to an already-matched node. Yields candidate nodes that might like.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py", - "ast_data": "FunctionDef name:find_anchor_nodes arguments arg:self arg:ctx type:MatchContext arg:searched type:OrderedSet[torch.fx.Node] If Compare op:In Return return:no For If Call call:isinstance For Call call:find_anchor_nodes If For If Compare op:NotIn If Call call:_match_fns" - }, - { - "library": "django", - "name": "close_if_health_check_failed", - "source_code": "def close_if_health_check_failed(self): if self.connection is None or not self.health_check_enabled or self.health_check_done: return if not self.is_usable(): self.close() self.health_check_done = True", - "docstring": "Close existing connection if it fails a health check.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\base.py", - "ast_data": "FunctionDef name:close_if_health_check_failed arguments arg:self If BoolOp Compare op:Is Return return:no If Assign" - }, - { - "library": "tensorflow", - "name": "from_keras_model_file", - "source_code": "@classmethod def from_keras_model_file(cls, model_file, input_arrays = None, input_shapes = None, output_arrays = None, custom_objects = None): TFLiteConverterBase._set_original_model_type(conversion_metadata_fb.ModelType.KERAS_MODEL) return TFLiteKerasModelConverter(model_file, input_arrays, input_shapes, output_arrays, custom_objects)", - "docstring": "Creates a TFLiteConverter class from a tf.keras model file. Args: model_file: Full filepath of HDF5 file containing the tf.keras model. input_arrays: List of input tensors to freeze graph with. Uses input arrays from SignatureDef when none are provided. (default None) input_shapes: Dict of strings representing input tensor names to list of integers representing input shapes (e.g., {\"foo\" : [1, 16, 16, 3]}). Automatically determined when input shapes is None (e.g., {\"foo\" : None}). (default None) output_arrays: List of output tensors to freeze graph with. Uses output arrays from SignatureDef when none are provided. (default None) custom_objects: Dict mapping names (strings) to custom classes or functions to be considered during model deserialization. (default None) Returns: TFLiteConverter class.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", - "ast_data": "FunctionDef name:from_keras_model_file arguments arg:cls arg:model_file arg:input_arrays arg:input_shapes arg:output_arrays arg:custom_objects Return return:yes" - }, - { - "library": "tensorflow", - "name": "array_like_impl", - "source_code": "def array_like_impl(array_fn, array_like_fn, tensor, dtype, name, optimize = True, layout = None): if not tensor_util.is_tf_type(tensor): tensor = ops.convert_to_tensor(tensor, name = 'tensor') tensor_shape = tensor.shape tensor_dtype = tensor.dtype if context.executing_eagerly(): if dtype is not None and dtype ! = tensor_dtype: return array_fn(shape_internal(tensor, optimize = optimize), dtype = dtype, name = name, layout = layout) return d_api.call_with_layout(array_like_fn, layout, tensor, name = name) if optimize and tensor_shape.is_fully_defined() and (tensor_dtype ! = dtypes.variant): return array_fn(tensor_shape, dtype = dtype or tensor_dtype, name = name, layout = layout) if dtype is not None and dtype ! = tensor_dtype and (dtype ! = dtypes.variant): return array_fn(shape_internal(tensor, optimize = optimize), dtype = dtype, name = name, layout = layout) return d_api.call_with_layout(array_like_fn, layout, tensor, name = name)", - "docstring": "Internal implementation for ones_like and zeros_like API calls.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\array_ops.py", - "ast_data": "FunctionDef name:array_like_impl arguments arg:array_fn arg:array_like_fn arg:tensor arg:dtype arg:name arg:optimize arg:layout If Assign Call call:convert_to_tensor Assign Assign If Call call:executing_eagerly If BoolOp Compare op:IsNot Compare op:NotEq Return return:yes Return return:yes If BoolOp Call call:is_fully_defined Compare op:NotEq Return return:yes If BoolOp Compare op:IsNot Compare op:NotEq Compare op:NotEq Return return:yes Return return:yes" - }, - { - "library": "kornia", - "name": "translation", - "source_code": "@property def translation(self) -> Vector3 | Tensor: return self._translation", - "docstring": "Return the underlying translation vector of shape :math:.", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py", - "ast_data": "FunctionDef name:translation arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_data_ratio", - "source_code": "def get_data_ratio(self): return 1.0", - "docstring": "Return the aspect ratio of the data itself. For a polar plot, this should always be 1.0", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py", - "ast_data": "FunctionDef name:get_data_ratio arguments arg:self Return return:yes" - }, - { - "library": "cherrypy", - "name": "wait", - "source_code": "def wait(self): while not getattr(self.httpserver, 'ready', False): if self.interrupt: raise self.interrupt time.sleep(0.1) if os.environ.get('LISTEN_PID', None): return if not isinstance(self.bind_addr, tuple): return with _safe_wait(*self.bound_addr): portend.occupied(*self.bound_addr, timeout = Timeouts.occupied)", - "docstring": "Wait until the HTTP server is ready to receive requests.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\process\\servers.py", - "ast_data": "FunctionDef name:wait arguments arg:self While If Raise raises:self.interrupt If Call call:get Return return:no If Return return:no With" - }, - { - "library": "cherrypy", - "name": "stats", - "source_code": "def stats(self, filename, sortby = 'cumulative'): sio = io.StringIO() s = pstats.Stats(os.path.join(self.path, filename), stream = sio) s.strip_dirs() s.sort_stats(sortby) s.print_stats() response = sio.getvalue() sio.close() return response", - "docstring": "Generate statistics from given profile. :returns: The sorted stats index printout. :rtype: str", - "type": "method", - "file_path": "cherrypy\\cherrypy\\lib\\profiler.py", - "ast_data": "FunctionDef name:stats arguments arg:self arg:filename arg:sortby Assign Call call:StringIO Assign Call call:Stats Assign Call call:getvalue Return return:yes" - }, - { - "library": "pytorch", - "name": "set_rng_state_all", - "source_code": "def set_rng_state_all(new_states: Iterable[Tensor]) -> None: for i, state in enumerate(new_states): set_rng_state(state, i)", - "docstring": "Set the random number generator state of all devices. Args: new_states (Iterable of torch.ByteTensor): The desired state for each device.", - "type": "function", - "file_path": "pytorch\\torch\\xpu\\random.py", - "ast_data": "FunctionDef name:set_rng_state_all arguments arg:new_states type:Iterable[Tensor] For Call call:enumerate" - }, - { - "library": "pytorch", - "name": "get_fwd_send_ops", - "source_code": "def get_fwd_send_ops(self, fwd_chunk_id: int) -> list[dist.P2POp]: output_tuple, _ = self.fwd_cache[fwd_chunk_id] ops: list[dist.P2POp] = [] for idx, out in enumerate(output_tuple): dst_stages = self.act_send_info[idx] for dst in dst_stages: if dst is None: continue logger.debug('%s Sending tensor to Stage %s: %s', self.log_prefix, dst, out.size()) peer_rank = self.stage_index_to_group_rank[dst] peer_global_rank = peer_rank if self.group is None else dist.get_global_rank(self.group, peer_rank) ops.append(dist.P2POp(dist.isend, out, peer_global_rank, self.group)) return ops", - "docstring": "Get the activation send ops for current stage's forward.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py", - "ast_data": "FunctionDef name:get_fwd_send_ops arguments arg:self arg:fwd_chunk_id type:int Assign For Call call:enumerate Assign For If Compare op:Is Assign Assign Return return:yes" - }, - { - "library": "pandas", - "name": "nanvar", - "source_code": "@disallow('M8', 'm8') @bottleneck_switch(ddof = 1) def nanvar(values: np.ndarray, *, axis: AxisInt | None = None, skipna: bool = True, ddof: int = 1, mask = None): dtype = values.dtype mask = _maybe_get_mask(values, skipna, mask) if dtype.kind in 'iu': values = values.astype('f8') if mask is not None: values[mask] = np.nan if values.dtype.kind = = 'f': count, d = _get_counts_nanvar(values.shape, mask, axis, ddof, values.dtype) else: count, d = _get_counts_nanvar(values.shape, mask, axis, ddof) if skipna and mask is not None: values = values.copy() np.putmask(values, mask, 0) avg = _ensure_numeric(values.sum(axis = axis, dtype = np.float64)) / count if axis is not None: avg = np.expand_dims(avg, axis) sqr = _ensure_numeric((avg - values) ** 2) if mask is not None: np.putmask(sqr, mask, 0) result = sqr.sum(axis = axis, dtype = np.float64) / d if dtype.kind = = 'f': result = result.astype(dtype, copy = False) return result", - "docstring": "Compute the variance along given axis while ignoring NaNs Parameters ---------- values : ndarray axis : int, optional skipna : bool, default True ddof : int, default 1 Delta Degrees of Freedom. The divisor used in calculations is N - ddof, where N represents the number of elements. mask : ndarray[bool], optional nan-mask if known Returns ------- result : float Unless input is a float array, in which case use the same precision as the input array. Examples -------- >>> from pandas.core import nanops >>> s = pd.Series([1, np.nan, 2, 3]) >>> nanops.nanvar(s.values) 1.0", - "type": "function", - "file_path": "pandas\\pandas\\core\\nanops.py", - "ast_data": "FunctionDef name:nanvar arguments arg:values type:np.ndarray Call call:disallow Call call:bottleneck_switch Assign Assign Call call:_maybe_get_mask If Compare op:In Assign Call call:astype If Compare op:IsNot Assign If Compare op:Eq Assign Call call:_get_counts_nanvar Assign Call call:_get_counts_nanvar If BoolOp Compare op:IsNot Assign Call call:copy Assign If Compare op:IsNot Assign Call call:expand_dims Assign Call call:_ensure_numeric If Compare op:IsNot Assign If Compare op:Eq Assign Call call:astype Return return:yes" - }, - { - "library": "pytorch", - "name": "BNReLU3d", - "source_code": "class BNReLU3d(_FusedModule): def __init__(self, batch_norm, relu): assert type_before_parametrizations(batch_norm) = = BatchNorm3d and type_before_parametrizations(relu) = = ReLU, f'Incorrect types for input modules{type_before_parametrizations(batch_norm)}{type_before_parametrizations(relu)}' super().__init__(batch_norm, relu)", - "docstring": "This is a sequential container which calls the BatchNorm 3d and ReLU modules. During quantization this will be replaced with the corresponding fused module.", - "type": "class", - "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\modules\\fused.py", - "ast_data": "ClassDef name:BNReLU3d FunctionDef name:__init__ arguments arg:self arg:batch_norm arg:relu" - }, - { - "library": "algorithms", - "name": "find_all_cliques", - "source_code": "def find_all_cliques(edges): def expand_clique(candidates, nays): nonlocal compsub if not candidates and (not nays): nonlocal solutions solutions.append(compsub.copy()) else: for selected in candidates.copy(): candidates.remove(selected) candidates_temp = get_connected(selected, candidates) nays_temp = get_connected(selected, nays) compsub.append(selected) expand_clique(candidates_temp, nays_temp) nays.add(compsub.pop()) def get_connected(vertex, old_set): new_set = set() for neighbor in edges[str(vertex)]: if neighbor in old_set: new_set.add(neighbor) return new_set compsub = [] solutions = [] possibles = set(edges.keys()) expand_clique(possibles, set()) return solutions", - "docstring": "takes dict of sets each key is a vertex value is set of all edges connected to vertex returns list of lists (each sub list is a maximal clique) implementation of the basic algorithm described in: Bron, Coen; Kerbosch, Joep (1973), \"Algorithm 457: finding all cliques of an undirected graph\",", - "type": "function", - "file_path": "algorithms\\algorithms\\graph\\find_all_cliques.py", - "ast_data": "FunctionDef name:find_all_cliques arguments arg:edges FunctionDef name:expand_clique arguments arg:candidates arg:nays If BoolOp For Call call:copy Assign Call call:get_connected Assign Call call:get_connected FunctionDef name:get_connected arguments arg:vertex arg:old_set Assign Call call:set For If Compare op:In Return return:yes Assign Assign Assign Call call:set Return return:yes" - }, - { - "library": "pytorch", - "name": "from_builtin_function", - "source_code": "@classmethod def from_builtin_function(cls, builtin_function: types.BuiltinFunctionType) -> OpName: op = builtin_function.__name__ module = builtin_function.__module__ return cls.from_qualified_name(module + ': : ' + op)", - "docstring": "From a builtin function, e.g. operator.add, math.ceil, etc, get the OpName. FX graph uses built-in functions to caculate sympy expression. This function is used to get the OpName from a builtin function. Args: builtin_function (types.BuiltinFunctionType): operator.add, math.ceil, etc. Returns: OpName: _description_", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\registration.py", - "ast_data": "FunctionDef name:from_builtin_function arguments arg:cls arg:builtin_function type:types.BuiltinFunctionType Assign Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_data", - "source_code": "def set_data(self, t, f1, f2, *, where = None): t, f1, f2 = self.axes._fill_between_process_units(self.t_direction, self._f_direction, t, f1, f2) verts = self._make_verts(t, f1, f2, where) self.set_verts(verts)", - "docstring": "Set new values for the two bounding curves. Parameters ---------- t : array-like The ``. Note that this definition implies that an isolated *True* value between two *False* values in *where* will not result in filling. Both sides of the *True* position remain unfilled due to the adjacent *False* values. See Also -------- .PolyCollection.set_verts, .Line2D.set_data", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\collections.py", - "ast_data": "FunctionDef name:set_data arguments arg:self arg:t arg:f1 arg:f2 Assign Call call:_fill_between_process_units Assign Call call:_make_verts" - }, - { - "library": "pytorch", - "name": "get_element", - "source_code": "def get_element(root_dict: STATE_DICT_TYPE, path: OBJ_PATH, default_value: Optional[T] = None) -> Optional[T]: cur_value = cast(CONTAINER_TYPE, root_dict) for part in path: if type(part) is int: if not isinstance(cur_value, list) or len(cur_value) < part: return default_value elif not isinstance(cur_value, Mapping) or part not in cur_value: return default_value cur_value = cast(CONTAINER_TYPE, cur_value[part]) return cast(Optional[T], cur_value)", - "docstring": "Retrieve the value at `` if not found.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\checkpoint\\_traverse.py", - "ast_data": "FunctionDef name:get_element arguments arg:root_dict type:STATE_DICT_TYPE arg:path type:OBJ_PATH arg:default_value type:Optional[T] Assign Call call:cast For If Compare op:Is If BoolOp Compare op:Lt Return return:yes If BoolOp Compare op:NotIn Return return:yes Assign Call call:cast Return return:yes" - }, - { - "library": "tensorflow", - "name": "local_conv2d", - "source_code": "@dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format = None): return local_conv(inputs, kernel, kernel_size, strides, output_shape, data_format)", - "docstring": "Apply 2D conv with un-shared weights. Args: inputs: 4D tensor with shape: (batch_size, filters, new_rows, new_cols) if data_format='channels_first' or 4D tensor with shape: (batch_size, new_rows, new_cols, filters) if data_format='channels_last'. kernel: the unshared weight for convolution, with shape (output_items, feature_dim, filters). kernel_size: a tuple of 2 integers, specifying the width and height of the 2D convolution window. strides: a tuple of 2 integers, specifying the strides of the convolution along the width and height. output_shape: a tuple with (output_row, output_col). data_format: the data format, channels_first or channels_last. Returns: A 4D tensor with shape: (batch_size, filters, new_rows, new_cols) if data_format='channels_first' or 4D tensor with shape: (batch_size, new_rows, new_cols, filters) if data_format='channels_last'.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", - "ast_data": "FunctionDef name:local_conv2d arguments arg:inputs arg:kernel arg:kernel_size arg:strides arg:output_shape arg:data_format Return return:yes" - }, - { - "library": "tensorflow", - "name": "append", - "source_code": "def append(self, line, font_attr_segs = None): self._lines.append(line) if font_attr_segs: self._font_attr_segs[len(self._lines) - 1] = font_attr_segs", - "docstring": "Append a single line of text. Args: line: (str) The text to be added to the end. font_attr_segs: (list of tuples) Font attribute segments of the appended line.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", - "ast_data": "FunctionDef name:append arguments arg:self arg:line arg:font_attr_segs If Assign" - }, - { - "library": "authlib", - "name": "sign", - "source_code": "def sign(self, msg, key): raise NotImplementedError", - "docstring": "Sign the text msg with a private/sign key. :param msg: message bytes to be signed :param key: private key to sign the message :return: bytes", - "type": "method", - "file_path": "authlib\\authlib\\jose\\rfc7515\\models.py", - "ast_data": "FunctionDef name:sign arguments arg:self arg:msg arg:key Raise raises:NotImplementedError" - }, - { - "library": "django", - "name": "is_ignorable_request", - "source_code": "def is_ignorable_request(self, request, uri, domain, referer): if not referer: return True if settings.APPEND_SLASH and uri.endswith('/') and (referer = = uri[: -1]): return True if not self.is_internal_request(domain, referer) and '?' in referer: return True parsed_referer = urlsplit(referer) if parsed_referer.netloc in ['', domain] and parsed_referer.path = = uri: return True return any((pattern.search(uri) for pattern in settings.IGNORABLE_404_URLS))", - "docstring": "Return True if the given request *shouldn't* notify the site managers according to project settings or in situations outlined by the inline comments.", - "type": "method", - "file_path": "django\\django\\middleware\\common.py", - "ast_data": "FunctionDef name:is_ignorable_request arguments arg:self arg:request arg:uri arg:domain arg:referer If Return return:yes If BoolOp Call call:endswith Compare op:Eq Return return:yes If BoolOp Compare op:In Return return:yes Assign Call call:urlsplit If BoolOp Compare op:In Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "trace_with_input_signature", - "source_code": "def trace_with_input_signature(self): if None not in nest.flatten(self._input_signature) and self._has_kwargs: self.add_trace(*self._input_signature)", - "docstring": "Trace with the layer/models inferred input signature if possible.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save_impl.py", - "ast_data": "FunctionDef name:trace_with_input_signature arguments arg:self If BoolOp Compare op:NotIn" - }, - { - "library": "matplotlib", - "name": "bubble", - "source_code": "def bubble(self, a): if a not in self._axes: raise ValueError('Axes has not been added yet') self._axes[a] = next(self._counter)", - "docstring": "Move an Axes, which must already exist in the stack, to the top.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\figure.py", - "ast_data": "FunctionDef name:bubble arguments arg:self arg:a If Compare op:NotIn Raise raises:ValueError('Axes has not been added yet') Assign Call call:next" - }, - { - "library": "pytorch", - "name": "collect_callgrind", - "source_code": "def collect_callgrind(self, task_spec: common.TaskSpec, globals: dict[str, Any], *, number: int, repeats: int, collect_baseline: bool, is_python: bool, retain_out_file: bool) -> tuple[CallgrindStats, ...]: self._validate() assert is_python or not collect_baseline *task_stats, baseline_stats = self._invoke(task_spec = task_spec, globals = globals, number = number, repeats = repeats, collect_baseline = collect_baseline, is_python = is_python, retain_out_file = retain_out_file) assert len(task_stats) = = repeats return tuple((CallgrindStats(task_spec = task_spec, number_per_run = number, built_with_debug_symbols = self._build_type = = 'RelWithDebInfo', baseline_inclusive_stats = baseline_stats[0], baseline_exclusive_stats = baseline_stats[1], stmt_inclusive_stats = stmt_inclusive_stats, stmt_exclusive_stats = stmt_exclusive_stats, stmt_callgrind_out = out_contents) for stmt_inclusive_stats, stmt_exclusive_stats, out_contents in task_stats))", - "docstring": "Collect stats, and attach a reference run which can be used to filter interpreter overhead.", - "type": "method", - "file_path": "pytorch\\torch\\utils\\benchmark\\utils\\valgrind_wrapper\\timer_interface.py", - "ast_data": "FunctionDef name:collect_callgrind arguments arg:self arg:task_spec type:common.TaskSpec arg:globals type:dict[str, Any] Assign Call call:_invoke Return return:yes" - }, - { - "library": "pandas", - "name": "agg_series", - "source_code": "@final def agg_series(self, obj: Series, func: Callable, preserve_dtype: bool = False) -> ArrayLike: if not isinstance(obj._values, np.ndarray): preserve_dtype = True result = self._aggregate_series_pure_python(obj, func) npvalues = lib.maybe_convert_objects(result, try_float = False) if preserve_dtype: out = maybe_cast_pointwise_result(npvalues, obj.dtype, numeric_only = True) else: out = npvalues return out", - "docstring": "Parameters ---------- obj : Series func : function taking a Series and returning a scalar-like preserve_dtype : bool Whether the aggregation is known to be dtype-preserving. Returns ------- np.ndarray or ExtensionArray", - "type": "method", - "file_path": "pandas\\pandas\\core\\groupby\\ops.py", - "ast_data": "FunctionDef name:agg_series arguments arg:self arg:obj type:Series arg:func type:Callable arg:preserve_dtype type:bool If Assign Assign Call call:_aggregate_series_pure_python Assign Call call:maybe_convert_objects If Assign Call call:maybe_cast_pointwise_result Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "is_supertype_of", - "source_code": "def is_supertype_of(self, other: 'FunctionType') -> bool: if len(self.parameters) ! = len(other.parameters): return False for self_param, other_param in zip(self.parameters.values(), other.parameters.values()): if not self_param.is_subtype_of(other_param): return False if not all((name in other.captures for name in self.captures)): return False return all((capture_type.is_subtype_of(other.captures[name]) for name, capture_type in self.captures.items()))", - "docstring": "Returns True if self is a supertype of other FunctionType.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py", - "ast_data": "FunctionDef name:is_supertype_of arguments arg:self arg:other type:'FunctionType' If Compare op:NotEq Return return:yes For Call call:zip If Return return:yes If Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "__get__", - "source_code": "def __get__(self, instance, cls = None): if instance is None: return self try: rel_obj = self.field.get_cached_value(instance) except KeyError: has_value = None not in self.field.get_local_related_value(instance) ancestor_link = instance._meta.get_ancestor_link(self.field.model) if has_value else None if ancestor_link and ancestor_link.is_cached(instance): ancestor = ancestor_link.get_cached_value(instance) rel_obj = self.field.get_cached_value(ancestor, default = None) else: rel_obj = None if rel_obj is None and has_value: rel_obj = self.get_object(instance) remote_field = self.field.remote_field if not remote_field.multiple: remote_field.set_cached_value(rel_obj, instance) self.field.set_cached_value(instance, rel_obj) if rel_obj is None and (not self.field.null): raise self.RelatedObjectDoesNotExist('%s has no %s.' % (self.field.model.__name__, self.field.name)) else: return rel_obj", - "docstring": "Get the related instance through the forward relation. With the example above, when getting `` class (we don't need it)", - "type": "method", - "file_path": "django\\django\\db\\models\\fields\\related_descriptors.py", - "ast_data": "FunctionDef name:__get__ arguments arg:self arg:instance arg:cls If Compare op:Is Return return:yes Try Assign Call call:get_cached_value ExceptHandler Assign Compare op:NotIn Assign If BoolOp Call call:is_cached Assign Call call:get_cached_value Assign Call call:get_cached_value Assign If BoolOp Compare op:Is Assign Call call:get_object Assign If If BoolOp Compare op:Is Raise raises:self.RelatedObjectDoesNotExist('%s has no %s.' % (self.field.model.__name__, self.field.name)) Return return:yes" - }, - { - "library": "pandas", - "name": "__getitem__", - "source_code": "def __getitem__(self, key: PositionalIndexer2D) -> Self | DTScalarOrNaT: result = cast('Union[Self, DTScalarOrNaT]', super().__getitem__(key)) if lib.is_scalar(result): return result else: result = cast(Self, result) result._freq = self._get_getitem_freq(key) return result", - "docstring": "This getitem defers to the underlying array, which by-definition can only handle list-likes, slices, and integer scalars", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py", - "ast_data": "FunctionDef name:__getitem__ arguments arg:self arg:key type:PositionalIndexer2D Assign Call call:cast If Call call:is_scalar Return return:yes Assign Call call:cast Assign Call call:_get_getitem_freq Return return:yes" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, axis = None, use_rmin = True, *, apply_theta_transforms = True, scale_transform = None): super().__init__() self._axis = axis self._use_rmin = use_rmin self._apply_theta_transforms = apply_theta_transforms self._scale_transform = scale_transform if apply_theta_transforms: _apply_theta_transforms_warn()", - "docstring": "Parameters ---------- axis : , optional Axis associated with this transform. This is used to get the minimum radial limit. use_rmin : , optional If ``, subtract the minimum radial axis limit before transforming to Cartesian coordinates. *axis* must also be specified for this to take effect.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:axis arg:use_rmin Assign Assign Assign Assign If" - }, - { - "library": "pandas", - "name": "round", - "source_code": "def round(self, decimals: int = 0) -> Self: return self._constructor(self.to_series().round(decimals))", - "docstring": "Round each value in the Index to the given number of decimals. Parameters ---------- decimals : int, optional Number of decimal places to round to. If decimals is negative, it specifies the number of positions to the left of the decimal point. Returns ------- Index A new Index with the rounded values. Examples -------- >>> import pandas as pd >>> idx = pd.Index([10.1234, 20.5678, 30.9123, 40.4567, 50.7890]) >>> idx.round(decimals=2) Index([10.12, 20.57, 30.91, 40.46, 50.79], dtype='float64')", - "type": "method", - "file_path": "pandas\\pandas\\core\\indexes\\base.py", - "ast_data": "FunctionDef name:round arguments arg:self arg:decimals type:int Return return:yes" - }, - { - "library": "cherrypy", - "name": "process_urlencoded", - "source_code": "def process_urlencoded(entity): qs = entity.fp.read() for charset in entity.attempt_charsets: try: params = {} for aparam in qs.split(b'&'): for pair in aparam.split(b';'): if not pair: continue atoms = pair.split(b' = ', 1) if len(atoms) = = 1: atoms.append(b'') key = unquote_plus(atoms[0]).decode(charset) value = unquote_plus(atoms[1]).decode(charset) if key in params: if not isinstance(params[key], list): params[key] = [params[key]] params[key].append(value) else: params[key] = value except UnicodeDecodeError: pass else: entity.charset = charset break else: raise cherrypy.HTTPError(400, 'The request entity could not be decoded. The following charsets were attempted: %s' % repr(entity.attempt_charsets)) for key, value in params.items(): if key in entity.params: if not isinstance(entity.params[key], list): entity.params[key] = [entity.params[key]] entity.params[key].append(value) else: entity.params[key] = value", - "docstring": "Read application/x-www-form-urlencoded data into entity.params.", - "type": "function", - "file_path": "cherrypy\\cherrypy\\_cpreqbody.py", - "ast_data": "FunctionDef name:process_urlencoded arguments arg:entity Assign Call call:read For Try Assign For Call call:split For Call call:split If Assign Call call:split If Compare op:Eq Assign Call call:decode Assign Call call:decode If Compare op:In If Assign Assign ExceptHandler Assign Raise raises:cherrypy.HTTPError(400, 'The request entity could not be decoded. The following charsets were attempted: %s' % repr(entity.attempt_charsets)) For Call call:items If Compare op:In If Assign Assign" - }, - { - "library": "mongo", - "name": "CommandLogger", - "source_code": "class CommandLogger(monitoring.CommandListener): def started(self, event: monitoring.CommandStartedEvent) -> None: logging.info(f'Command {event.command_name} with request id {event.request_id} started on server {event.connection_id}') def succeeded(self, event: monitoring.CommandSucceededEvent) -> None: logging.info(f'Command {event.command_name} with request id {event.request_id} on server {event.connection_id} succeeded in {event.duration_micros} microseconds') def failed(self, event: monitoring.CommandFailedEvent) -> None: logging.info(f'Command {event.command_name} with request id {event.request_id} on server {event.connection_id} failed in {event.duration_micros} microseconds')", - "docstring": "A simple listener that logs command events. Listens for :class:, :class: and :class: events and logs them at the severity level using :mod:. .. versionadded:: 3.11", - "type": "class", - "file_path": "mongo\\pymongo\\event_loggers.py", - "ast_data": "ClassDef name:CommandLogger FunctionDef name:started arguments arg:self arg:event type:monitoring.CommandStartedEvent FunctionDef name:succeeded arguments arg:self arg:event type:monitoring.CommandSucceededEvent FunctionDef name:failed arguments arg:self arg:event type:monitoring.CommandFailedEvent" - }, - { - "library": "matplotlib", - "name": "get_figure", - "source_code": "def get_figure(self, root = None): if self._root_figure is self: return self if self._parent is self._root_figure: return self._parent if root is None: message = 'From Matplotlib 3.12 SubFigure.get_figure will by default return the direct parent figure, which may be a SubFigure. To suppress this warning, pass the root parameter. Pass `True` to maintain the old behavior and `False` to opt-in to the future behavior.' _api.warn_deprecated('3.10', message = message) root = True if root: return self._root_figure return self._parent", - "docstring": "Return the or instance the (Sub)Figure belongs to. Parameters ---------- root : bool, default=True If False, return the (Sub)Figure this artist is on. If True, return the root Figure for a nested tree of SubFigures. .. deprecated:: 3.10 From version 3.12 *root* will default to False.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\figure.py", - "ast_data": "FunctionDef name:get_figure arguments arg:self arg:root If Compare op:Is Return return:yes If Compare op:Is Return return:yes If Compare op:Is Assign Assign If Return return:yes Return return:yes" - }, - { - "library": "authlib", - "name": "query_token", - "source_code": "def query_token(self, token_string, token_type_hint): raise NotImplementedError()", - "docstring": "Get the token from database/storage by the given token string. Developers should implement this method:: def query_token(self, token_string, token_type_hint): if token_type_hint == 'access_token': return Token.query_by_access_token(token_string) if token_type_hint == 'refresh_token': return Token.query_by_refresh_token(token_string) return Token.query_by_access_token(token_string) or Token.query_by_refresh_token(token_string)", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\rfc7009\\revocation.py", - "ast_data": "FunctionDef name:query_token arguments arg:self arg:token_string arg:token_type_hint Raise raises:NotImplementedError()" - }, - { - "library": "kornia", - "name": "vee", - "source_code": "@staticmethod def vee(omega: Tensor) -> Tensor: check_se2_omega_shape(omega) upsilon = omega[..., 2, : 2] theta = So2.vee(omega[..., : 2, : 2]) return concatenate((upsilon, theta[..., None]), -1)", - "docstring": "Convert elements from lie algebra to vector space. Args: omega: 3x3-matrix representing lie algebra of shape :math:. Returns: vector of shape :math:. Example: >>> v = torch.ones(3) >>> omega_hat = Se2.hat(v) >>> Se2.vee(omega_hat) tensor([1., 1., 1.])", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py", - "ast_data": "FunctionDef name:vee arguments arg:omega type:Tensor Assign Assign Call call:vee Return return:yes" - }, - { - "library": "tensorflow", - "name": "merge_from", - "source_code": "def merge_from(self, dev): self.job, self.replica, self.task, self.device_type, self.device_index = self._get_combined_properties(dev)", - "docstring": "Merge the properties of \"dev\" into this . Note: Will be removed in TensorFlow 2.x since DeviceSpecs will become immutable. Args: dev: a .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\device_spec.py", - "ast_data": "FunctionDef name:merge_from arguments arg:self arg:dev Assign Call call:_get_combined_properties" - }, - { - "library": "scipy", - "name": "integrate", - "source_code": "def integrate(self, t, step = False, relax = False): if step and self._integrator.supports_step: mth = self._integrator.step elif relax and self._integrator.supports_run_relax: mth = self._integrator.run_relax else: mth = self._integrator.run try: self._y, self.t = mth(self.f, self.jac or (lambda: None), self._y, self.t, t, self.f_params, self.jac_params) except SystemError as e: raise ValueError('Function to integrate must not return a tuple.') from e return self._y", - "docstring": "Find y=y(t), set y as an initial condition, and return y. Parameters ---------- t : float The endpoint of the integration step. step : bool If True, and if the integrator supports the step method, then perform a single integration step and return. This parameter is provided in order to expose internals of the implementation, and should not be changed from its default value in most cases. relax : bool If True and if the integrator supports the run_relax method, then integrate until t_1 >= t and return. ``. This parameter is provided in order to expose internals of the implementation, and should not be changed from its default value in most cases. Returns ------- y : float The integrated value at t", - "type": "method", - "file_path": "scipy\\scipy\\integrate\\_ode.py", - "ast_data": "FunctionDef name:integrate arguments arg:self arg:t arg:step arg:relax If BoolOp Assign If BoolOp Assign Assign Try Assign Call call:mth ExceptHandler Raise raises:ValueError('Function to integrate must not return a tuple.') Return return:yes" - }, - { - "library": "matplotlib", - "name": "push", - "source_code": "def push(self, o): self._elements[self._pos + 1:] = [o] self._pos = len(self._elements) - 1 return o", - "docstring": "Push *o* to the stack after the current position, and return *o*. Discard all later elements.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", - "ast_data": "FunctionDef name:push arguments arg:self arg:o Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "MaxSizePartitioner", - "source_code": "@tf_export('distribute.experimental.partitioners.MaxSizePartitioner', v1 = []) class MaxSizePartitioner(Partitioner): def __init__(self, max_shard_bytes, max_shards = None, bytes_per_string = 16): if max_shard_bytes < 1: raise ValueError(f'Argument `max_shard_bytes` must be positive. Received {max_shard_bytes}') if max_shards and max_shards < 1: raise ValueError(f'Argument `max_shards` must be positive. Received {max_shards}') if bytes_per_string < 1: raise ValueError(f'Argument `bytes_per_string` must be positive. Received: {bytes_per_string}') self._max_shard_bytes = max_shard_bytes self._max_shards = max_shards self._bytes_per_string = bytes_per_string def __call__(self, shape, dtype, axis = 0): return partitioned_variables.variable_axis_size_partitioner(max_shard_bytes = self._max_shard_bytes, max_shards = self._max_shards, bytes_per_string_element = self._bytes_per_string, axis = axis)(shape, dtype)", - "docstring": "Partitioner that keeps shards below . This partitioner ensures each shard has at most , and tries to allocate as few shards as possible, i.e., keeping shard size as large as possible. If the partitioner hits the limit, then each shard may end up larger than . By default equals and no limit on the number of shards is enforced. Examples: >>> partitioner = MaxSizePartitioner(max_shard_bytes=4) >>> partitions = partitioner(tf.TensorShape([6, 1]), tf.float32) >>> [6, 1] >>> partitioner = MaxSizePartitioner(max_shard_bytes=4, max_shards=2) >>> partitions = partitioner(tf.TensorShape([6, 1]), tf.float32) >>> [2, 1] >>> partitioner = MaxSizePartitioner(max_shard_bytes=1024) >>> partitions = partitioner(tf.TensorShape([6, 1]), tf.float32) >>> [1, 1] >>> >>> # use in ParameterServerStrategy >>> # strategy = tf.distribute.experimental.ParameterServerStrategy( >>> # cluster_resolver=cluster_resolver, variable_partitioner=partitioner)", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py", - "ast_data": "ClassDef name:MaxSizePartitioner Call call:tf_export FunctionDef name:__init__ arguments arg:self arg:max_shard_bytes arg:max_shards arg:bytes_per_string If Compare op:Lt Raise raises:ValueError(f'Argument `max_shard_bytes` must be positive. Received {max_shard_bytes}') If BoolOp Compare op:Lt Raise raises:ValueError(f'Argument `max_shards` must be positive. Received {max_shards}') If Compare op:Lt Raise raises:ValueError(f'Argument `bytes_per_string` must be positive. Received: {bytes_per_string}') Assign Assign Assign FunctionDef name:__call__ arguments arg:self arg:shape arg:dtype arg:axis Return return:yes" - }, - { - "library": "authlib", - "name": "authenticate_token_endpoint_client", - "source_code": "def authenticate_token_endpoint_client(self): client = self.server.authenticate_client(self.request, self.TOKEN_ENDPOINT_AUTH_METHODS) self.server.send_signal('after_authenticate_client', client = client, grant = self) return client", - "docstring": "Authenticate client with the given methods for token endpoint. For example, the client makes the following HTTP request using TLS: .. code-block:: http POST /token HTTP/1.1 Host: server.example.com Authorization: Basic czZCaGRSa3F0MzpnWDFmQmF0M2JW Content-Type: application/x-www-form-urlencoded grant_type=authorization_code&code=SplxlOBeZQQYbYS6WxSbIA &redirect_uri=https%3A%2F%2Fclient%2Eexample%2Ecom%2Fcb Default available methods are: \"none\", \"client_secret_basic\" and \"client_secret_post\". :return: client", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\rfc6749\\grants\\base.py", - "ast_data": "FunctionDef name:authenticate_token_endpoint_client arguments arg:self Assign Call call:authenticate_client Return return:yes" - }, - { - "library": "mongo", - "name": "find_one", - "source_code": "def find_one(self, filter: Optional[Any] = None, session: Optional[ClientSession] = None, *args: Any, **kwargs: Any) -> Optional[GridOut]: if filter is not None and (not isinstance(filter, abc.Mapping)): filter = {'_id': filter} _disallow_transactions(session) for f in self.find(filter, *args, session = session, **kwargs): return f return None", - "docstring": "Get a single file from gridfs. All arguments to :meth: are also valid arguments for :meth:, although any argument will be ignored. Returns a single :class:, or `find~pymongo.client_session.ClientSessionfind` parameter.", - "type": "method", - "file_path": "mongo\\gridfs\\synchronous\\grid_file.py", - "ast_data": "FunctionDef name:find_one arguments arg:self arg:filter type:Optional[Any] arg:session type:Optional[ClientSession] vararg:args kwarg:kwargs If BoolOp Compare op:IsNot Assign For Call call:find Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "factorize_from_iterable", - "source_code": "def factorize_from_iterable(values) -> tuple[np.ndarray, Index]: from pandas import CategoricalIndex if not is_list_like(values): raise TypeError('Input must be list-like') categories: Index vdtype = getattr(values, 'dtype', None) if isinstance(vdtype, CategoricalDtype): values = extract_array(values) cat_codes = np.arange(len(values.categories), dtype = values.codes.dtype) cat = Categorical.from_codes(cat_codes, dtype = values.dtype, validate = False) categories = CategoricalIndex(cat) codes = values.codes else: cat = Categorical(values, ordered = False) categories = cat.categories codes = cat.codes return (codes, categories)", - "docstring": "Factorize an input into and . Preserves categorical dtype in . Parameters ---------- values : list-like Returns ------- codes : ndarray categories : Index If has a categorical dtype, then is a CategoricalIndex keeping the categories and order of .", - "type": "function", - "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", - "ast_data": "FunctionDef name:factorize_from_iterable arguments arg:values If Raise raises:TypeError('Input must be list-like') Assign Call call:getattr If Call call:isinstance Assign Call call:extract_array Assign Call call:arange Assign Call call:from_codes Assign Call call:CategoricalIndex Assign Assign Call call:Categorical Assign Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "from_extents", - "source_code": "@staticmethod def from_extents(*args, minpos = None): bbox = Bbox(np.reshape(args, (2, 2))) if minpos is not None: bbox._minpos[:] = minpos return bbox", - "docstring": "Create a new Bbox from *left*, *bottom*, *right* and *top*. The *y*-axis increases upwards. Parameters ---------- left, bottom, right, top : float The four extents of the bounding box. minpos : float or None If this is supplied, the Bbox will have a minimum positive value set. This is useful when dealing with logarithmic scales and other scales where negative bounds result in floating point errors.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", - "ast_data": "FunctionDef name:from_extents arguments vararg:args Assign Call call:Bbox If Compare op:IsNot Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "assign_on_each_device", - "source_code": "def assign_on_each_device(var, assign_func, value, read_value): if var._packed_variable is not None: update = control_flow_ops.group(tuple((assign_func(d, var._packed_variable, value) for d in var._devices))) else: update = control_flow_ops.group(tuple((assign_func(v.device, v, value) for v in var._values))) if not read_value: return update with ops.control_dependencies([update] if update else []): return var.read_value()", - "docstring": "Update the variable on each replica with the given assign_func and value.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\values_util.py", - "ast_data": "FunctionDef name:assign_on_each_device arguments arg:var arg:assign_func arg:value arg:read_value If Compare op:IsNot Assign Call call:group Assign Call call:group If Return return:yes With Return return:yes" - }, - { - "library": "pytorch", - "name": "DonatedBuffer", - "source_code": "class DonatedBuffer(InputBuffer): pass", - "docstring": "Represents a donated buffer which is a saved tensor that is not alias to any fwd inputs, fwd user outputs, and bwd outputs. We generally cannot inplace reuse the input tensor memory during backward since it might be used in another function. However, donated buffer can be inplace reused during backward to save memory.", - "type": "class", - "file_path": "pytorch\\torch\\_inductor\\ir.py", - "ast_data": "ClassDef name:DonatedBuffer" - }, - { - "library": "salmon", - "name": "deliver", - "source_code": "def deliver(self, message): if self.RELOAD: self.reload() called_count = 0 for func, matchkw in self._collect_matches(message): LOG.debug('Matched %r against %s.', message.To, func.__name__) if salmon_setting(func, 'nolocking'): self.call_safely(func, message, matchkw) else: with self.call_lock: self.call_safely(func, message, matchkw) called_count + = 1 if called_count = = 0: self._enqueue_undeliverable(message)", - "docstring": "The meat of the whole Salmon operation, this method takes all the arguments given, and then goes through the routing listing to figure out which state handlers should get the gear. The routing operates on a simple set of rules: 1) Match on all functions that match the given To in their registered format pattern. 2) Call all @stateless state handlers functions. 3) Call the first method that's in the right state for the From/To. It will log which handlers are being run, and you can use the 'salmon route' command to inspect and debug routing problems. If you have an ERROR state function, then when your state blows up, it will transition to ERROR state and call your function right away. It will then stay in the ERROR state unless you return a different one.", - "type": "method", - "file_path": "salmon\\salmon\\routing.py", - "ast_data": "FunctionDef name:deliver arguments arg:self arg:message If Assign For Call call:_collect_matches If Call call:salmon_setting With If Compare op:Eq" - }, - { - "library": "kornia", - "name": "unproject_meshgrid", - "source_code": "def unproject_meshgrid(height: int, width: int, camera_matrix: Tensor, normalize_points: bool = False, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> Tensor: KORNIA_CHECK_SHAPE(camera_matrix, ['*', '3', '3']) points_uv: Tensor = create_meshgrid(height, width, normalized_coordinates = False, device = device, dtype = dtype).squeeze() camera_matrix_tmp: Tensor = camera_matrix[:, None, None] points_xy = normalize_points_with_intrinsics(points_uv, camera_matrix_tmp) points_xyz = convert_points_to_homogeneous(points_xy) if normalize_points: points_xyz = kornia_ops.normalize(points_xyz, dim = -1, p = 2) return points_xyz", - "docstring": "Compute a 3d point per pixel given its depth value and the camera intrinsics. .. tip:: This function should be used in conjunction with :py:func: to cache the meshgrid computation when warping multiple frames with the same camera intrinsics. Args: height: height of image. width: width of image. camera_matrix: tensor containing the camera intrinsics with shape :math:. normalize_points: whether to normalize the pointcloud. This must be set to when the depth is represented as the Euclidean ray length from the camera position. device: device to place the result on. dtype: dtype of the result. Return: tensor with a 3d point per pixel of the same resolution as the input :math:.", - "type": "function", - "file_path": "kornia\\kornia\\geometry\\depth.py", - "ast_data": "FunctionDef name:unproject_meshgrid arguments arg:height type:int arg:width type:int arg:camera_matrix type:Tensor arg:normalize_points type:bool arg:device type:Optional[torch.device] arg:dtype type:Optional[torch.dtype] Assign Call call:normalize_points_with_intrinsics Assign Call call:convert_points_to_homogeneous If Assign Call call:normalize Return return:yes" - }, - { - "library": "mongo", - "name": "database_name", - "source_code": "@property def database_name(self) -> str: return super().database_name", - "docstring": "The name of the database this command was run against.", - "type": "method", - "file_path": "mongo\\pymongo\\monitoring.py", - "ast_data": "FunctionDef name:database_name arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "field_admin_ordering", - "source_code": "def field_admin_ordering(self, field, request, model_admin): try: related_admin = model_admin.admin_site.get_model_admin(field.remote_field.model) except NotRegistered: return () else: return related_admin.get_ordering(request)", - "docstring": "Return the model admin's ordering for related field, if provided.", - "type": "method", - "file_path": "django\\django\\contrib\\admin\\filters.py", - "ast_data": "FunctionDef name:field_admin_ordering arguments arg:self arg:field arg:request arg:model_admin Try Assign Call call:get_model_admin ExceptHandler Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "disable_constraint_checking", - "source_code": "def disable_constraint_checking(self): with self.cursor() as cursor: cursor.execute('SET foreign_key_checks = 0') return True", - "docstring": "Disable foreign key checks, primarily for use in adding rows with forward references. Always return True to indicate constraint checks need to be re-enabled.", - "type": "method", - "file_path": "django\\django\\db\\backends\\mysql\\base.py", - "ast_data": "FunctionDef name:disable_constraint_checking arguments arg:self With Return return:yes" - }, - { - "library": "pytorch", - "name": "FlattenOutputStep", - "source_code": "class FlattenOutputStep(OutputAdaptStep): def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None = None) -> Sequence[Any]: return pytree.tree_leaves(model_outputs)", - "docstring": "Flatten nested collection types and return a flat list of elements. ONNX can't represent collection types (e.g., dictionary, tuple of tuple of tensor, etc). NOTE: Ideally we would want to use `SpecTree` can be validate for new model outputs. However, this is not possible currently because we never have access to real PyTorch model outputs during export. Only traced outputs may be available, but they are not an accurate reflection of the original PyTorch model outputs format as they are typically in their own unique format, depending on the tracing strategy.", - "type": "class", - "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py", - "ast_data": "ClassDef name:FlattenOutputStep FunctionDef name:apply arguments arg:self arg:model_outputs type:Any arg:model type:torch.nn.Module | Callable | torch_export.ExportedProgram | None Return return:yes" - }, - { - "library": "scipy", - "name": "matvec", - "source_code": "def matvec(self, x): x = np.asanyarray(x) M, N = self.shape if x.shape ! = (N,) and x.shape ! = (N, 1): raise ValueError('dimension mismatch') y = self._matvec(x) if isinstance(x, np.matrix): y = asmatrix(y) else: y = np.asarray(y) if x.ndim = = 1: y = y.reshape(M) elif x.ndim = = 2: y = y.reshape(M, 1) else: raise ValueError('invalid shape returned by user-defined matvec()') return y", - "docstring": "Matrix-vector multiplication. Performs the operation y=A@x where A is an MxN linear operator and x is a column vector or 1-d array. Parameters ---------- x : {matrix, ndarray} An array with shape (N,) or (N,1). Returns ------- y : {matrix, ndarray} A matrix or ndarray with shape (M,) or (M,1) depending on the type and shape of the x argument. Notes ----- This matvec wraps the user-specified matvec routine or overridden _matvec method to ensure that y has the correct shape and type.", - "type": "method", - "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py", - "ast_data": "FunctionDef name:matvec arguments arg:self arg:x Assign Call call:asanyarray Assign If BoolOp Compare op:NotEq Compare op:NotEq Raise raises:ValueError('dimension mismatch') Assign Call call:_matvec If Call call:isinstance Assign Call call:asmatrix Assign Call call:asarray If Compare op:Eq Assign Call call:reshape If Compare op:Eq Assign Call call:reshape Raise raises:ValueError('invalid shape returned by user-defined matvec()') Return return:yes" - }, - { - "library": "numpy", - "name": "tolist", - "source_code": "def tolist(self): _mask = self._mask if _mask is nomask: return self._data.tolist() result = [] for d, m in zip(self._data, self._mask): if m: result.append(None) else: result.append(d.item()) return tuple(result)", - "docstring": "Transforms the mvoid object into a tuple. Masked fields are replaced by None. Returns ------- returned_tuple Tuple of fields", - "type": "method", - "file_path": "numpy\\numpy\\ma\\core.py", - "ast_data": "FunctionDef name:tolist arguments arg:self Assign If Compare op:Is Return return:yes Assign For Call call:zip If Return return:yes" - }, - { - "library": "tensorflow", - "name": "encode", - "source_code": "def encode(self, spec, value, minimum_rank = 0): return spec._to_components(value)", - "docstring": "Encodes as a nest of batchable Tensors or CompositeTensors. The default definition returns a flat tuple of all the s, s, and s from a depth-first traversal of 's fields. Subclasses may override this default definition, when necessary. Args: spec: The TypeSpec of the value to encode. value: A value compatible with . minimum_rank: The minimum rank for the returned Tensors, CompositeTensors, and ExtensionType values. This can be used to ensure that the encoded values can be unbatched this number of times. If , then must be compatible for all values returned by . Returns: A nest (as defined by ) of s, batchable s, or s. Stacking, unstacking, or concatenating these encoded values and then decoding the result must be equivalent to stacking, unstacking, or concatenating the original values.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py", - "ast_data": "FunctionDef name:encode arguments arg:self arg:spec arg:value arg:minimum_rank Return return:yes" - }, - { - "library": "pytorch", - "name": "reduce_scatter_tensor", - "source_code": "def reduce_scatter_tensor(self: torch.Tensor, reduceOp: str, scatter_dim: int, group: RANK_TYPES, tag: str = ''): group_name = _resolve_group_name(group, tag) group_size = c10d._get_group_size_by_name(group_name) assert self.size(scatter_dim) % group_size = = 0, f'input dimension 0 ({self.size(0)} must be a multiple of group_size {group_size})' if scatter_dim ! = 0: tensor_list = torch.chunk(self, group_size, dim = scatter_dim) self = torch.cat(tensor_list) tensor = torch.ops._c10d_functional.reduce_scatter_tensor(self, reduceOp.lower(), group_size, group_name) res = _maybe_wrap_tensor(tensor) return res", - "docstring": "Reduces the tensor data across all machines in such a way that all get the final result, then scatter the results to corresponding ranks. The input tensor is left unmodified. Group can be one of: List[int]: ranks participating in the collective. List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD. ProcessGroup: Will perform a collective using the ranks and tag of the PG. DeviceMesh: Do a SPMD collective over all ranks of the mesh (DeviceMesh, int): Do a MPMD collective over one dimension of the DeviceMesh :: N.B. If you pass a PG or a 1D list to perform a MPMD collective, the compiler won't be able to recover that information and perform collective algebraic optimization. Use other forms of input for that.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py", - "ast_data": "FunctionDef name:reduce_scatter_tensor arguments arg:self type:torch.Tensor arg:reduceOp type:str arg:scatter_dim type:int arg:group type:RANK_TYPES arg:tag type:str Assign Call call:_resolve_group_name Assign Call call:_get_group_size_by_name If Compare op:NotEq Assign Call call:chunk Assign Call call:cat Assign Call call:reduce_scatter_tensor Assign Call call:_maybe_wrap_tensor Return return:yes" - }, - { - "library": "tensorflow", - "name": "placeholder_arguments", - "source_code": "def placeholder_arguments(self, placeholder_context: trace.PlaceholderContext) -> inspect.BoundArguments: arguments = collections.OrderedDict() for parameter in self.parameters.values(): if parameter.kind in {Parameter.VAR_POSITIONAL, Parameter.VAR_KEYWORD}: raise ValueError('Can not generate placeholder values for variable length function type.') if not parameter.type_constraint: raise ValueError('Can not generate placeholder value for partially defined function type.') placeholder_context.update_naming_scope(parameter.name) arguments[parameter.name] = parameter.type_constraint.placeholder_value(placeholder_context) return inspect.BoundArguments(self, arguments)", - "docstring": "Returns BoundArguments of values that can be used for tracing.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py", - "ast_data": "FunctionDef name:placeholder_arguments arguments arg:self arg:placeholder_context type:trace.PlaceholderContext Assign Call call:OrderedDict For Call call:values If Compare op:In Raise raises:ValueError('Can not generate placeholder values for variable length function type.') If Raise raises:ValueError('Can not generate placeholder value for partially defined function type.') Assign Call call:placeholder_value Return return:yes" - }, - { - "library": "tensorflow", - "name": "nth_element", - "source_code": "def nth_element(input, n, reverse = False, name = None): return gen_nn_ops.nth_element(input, n, reverse = reverse, name = name)", - "docstring": "Finds values of the -th smallest value for the last dimension. Note that n is zero-indexed. If the input is a vector (rank-1), finds the entries which is the nth-smallest value in the vector and outputs their values as scalar tensor. For matrices (resp. higher rank input), computes the entries which is the nth-smallest value in each row (resp. vector along the last dimension). Thus, values.shape = input.shape[:-1] Args: input: 1-D or higher with last dimension at least . n: A of type . 0-D. Position of sorted vector to select along the last dimension (along each row for matrices). Valid range of n is reverse: An optional . Defaults to . When set to True, find the nth-largest value in the vector and vice versa. name: A name for the operation (optional). Returns: A . Has the same type as . The -th order statistic along each last dimensional slice.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\nn_ops.py", - "ast_data": "FunctionDef name:nth_element arguments arg:input arg:n arg:reverse arg:name Return return:yes" - }, - { - "library": "pytorch", - "name": "register_state_dict_pre_hook", - "source_code": "def register_state_dict_pre_hook(self, hook: Callable[['Optimizer'], None], prepend: bool = False) -> RemovableHandle: handle = hooks.RemovableHandle(self._optimizer_state_dict_pre_hooks) self._optimizer_state_dict_pre_hooks[handle.id] = hook if prepend: self._optimizer_state_dict_pre_hooks.move_to_end(handle.id, last = False) return handle", - "docstring": "Register a state dict pre-hook which will be called before :meth: is called. It should have the following signature:: hook(optimizer) -> None The `torch.utils.hooks.RemoveableHandle`", - "type": "method", - "file_path": "pytorch\\torch\\optim\\optimizer.py", - "ast_data": "FunctionDef name:register_state_dict_pre_hook arguments arg:self arg:hook type:Callable[['Optimizer'], None] arg:prepend type:bool Assign Call call:RemovableHandle Assign If Return return:yes" - }, - { - "library": "mongo", - "name": "add_update", - "source_code": "def add_update(self, selector: Mapping[str, Any], update: Union[Mapping[str, Any], _Pipeline], multi: bool, upsert: Optional[bool], collation: Optional[Mapping[str, Any]] = None, array_filters: Optional[list[Mapping[str, Any]]] = None, hint: Union[str, dict[str, Any], None] = None, sort: Optional[Mapping[str, Any]] = None) -> None: validate_ok_for_update(update) cmd: dict[str, Any] = {'q': selector, 'u': update, 'multi': multi} if upsert is not None: cmd['upsert'] = upsert if collation is not None: self.uses_collation = True cmd['collation'] = collation if array_filters is not None: self.uses_array_filters = True cmd['arrayFilters'] = array_filters if hint is not None: self.uses_hint_update = True cmd['hint'] = hint if sort is not None: self.uses_sort = True cmd['sort'] = sort if multi: self.is_retryable = False self.ops.append((_UPDATE, cmd))", - "docstring": "Create an update document and add it to the list of ops.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\bulk.py", - "ast_data": "FunctionDef name:add_update arguments arg:self arg:selector type:Mapping[str, Any] arg:update type:Union[Mapping[str, Any], _Pipeline] arg:multi type:bool arg:upsert type:Optional[bool] arg:collation type:Optional[Mapping[str, Any]] arg:array_filters type:Optional[list[Mapping[str, Any]]] arg:hint type:Union[str, dict[str, Any], None] arg:sort type:Optional[Mapping[str, Any]] If Compare op:IsNot Assign If Compare op:IsNot Assign Assign If Compare op:IsNot Assign Assign If Compare op:IsNot Assign Assign If Compare op:IsNot Assign Assign If Assign" - }, - { - "library": "tensorflow", - "name": "validate_args", - "source_code": "@property def validate_args(self): return self._validate_args", - "docstring": "Returns True if Tensor arguments will be validated.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\bijector_impl.py", - "ast_data": "FunctionDef name:validate_args arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "is_batch_stride_largest", - "source_code": "def is_batch_stride_largest(mat1, mat2, layout) -> bool: sizes = [mat1.get_size(), mat2.get_size(), layout.size] strides = [mat1.get_stride(), mat2.get_stride(), layout.stride] for size, stride in zip(sizes, strides): assert len(size) = = len(stride) = = 3, 'Expect 3D tensors' if stride[0] ! = sympy_product(size[1:]): return False return True", - "docstring": "Checking if the batch stride is the largest in the stride.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\kernel\\mm_common.py", - "ast_data": "FunctionDef name:is_batch_stride_largest arguments arg:mat1 arg:mat2 arg:layout Assign Assign For Call call:zip If Compare op:NotEq Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "register_dataclass", - "source_code": "def register_dataclass(cls: type[Any], *, serialized_type_name: Optional[str] = None) -> None: pytree.register_dataclass(cls, serialized_type_name = serialized_type_name)", - "docstring": "Registers a dataclass as a valid input/output type for :func:. Args: cls: the dataclass type to register serialized_type_name: The serialized name for the dataclass. This is required if you want to serialize the pytree TreeSpec containing this dataclass. Example:: import torch from dataclasses import dataclass @dataclass class InputDataClass: feature: torch.Tensor bias: int @dataclass class OutputDataClass: res: torch.Tensor torch.export.register_dataclass(InputDataClass) torch.export.register_dataclass(OutputDataClass) class Mod(torch.nn.Module): def forward(self, x: InputDataClass) -> OutputDataClass: res = x.feature + x.bias return OutputDataClass(res=res) ep = torch.export.export(Mod(), (InputDataClass(torch.ones(2, 2), 1), )) print(ep)", - "type": "function", - "file_path": "pytorch\\torch\\export\\__init__.py", - "ast_data": "FunctionDef name:register_dataclass arguments arg:cls type:type[Any]" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, bysecond = None, interval = 1, tz = None): if bysecond is None: bysecond = range(60) rule = rrulewrapper(SECONDLY, bysecond = bysecond, interval = interval) super().__init__(rule, tz = tz)", - "docstring": "Parameters ---------- bysecond : int or list of int, default: all seconds Ticks will be placed on every second in *bysecond*. Default is `~datetime.tzinfotimezonedateutil.tz`.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\dates.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:bysecond arg:interval arg:tz If Compare op:Is Assign Call call:range Assign Call call:rrulewrapper" - }, - { - "library": "matplotlib", - "name": "points_to_pixels", - "source_code": "def points_to_pixels(self, points): return points", - "docstring": "Convert points to display units. You need to override this function (unless your backend doesn't have a dpi, e.g., postscript or svg). Some imaging systems assume some value for pixels per inch:: points to pixels = points * pixels_per_inch/72 * dpi/72 Parameters ---------- points : float or array-like Returns ------- Points converted to pixels", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", - "ast_data": "FunctionDef name:points_to_pixels arguments arg:self arg:points Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, statistics: calib_stats_pb2.CalibrationStatistics, calib_opts: stablehlo_quant_config_pb2.CalibrationOptions): super().__init__(statistics, calib_opts) hist_stats = statistics.histogram_statistics self._bin_width = hist_stats.bin_width self._lower_bound = hist_stats.lower_bound self._hist_freq = np.array(hist_stats.hist_freq) self._num_bins = len(self._hist_freq) self._num_bits = 8 first_mid = self._lower_bound + self._bin_width / 2 last_mid = first_mid + (self._num_bins - 1) * self._bin_width self._hist_mids = np.linspace(first_mid, last_mid, self._num_bins)", - "docstring": "Builds histogram using statistics.histogram_statistics. lower_bound hist_mid v v |=========|=========|=========|=========|=========| bin width Args: statistics: Collected calibration statistics. calib_opts: Calibration options used for calculating min and max.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\calibrator\\calibration_algorithm.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:statistics type:calib_stats_pb2.CalibrationStatistics arg:calib_opts type:stablehlo_quant_config_pb2.CalibrationOptions Assign Assign Assign Assign Call call:array Assign Call call:len Assign Assign Assign Assign Call call:linspace" - }, - { - "library": "pytorch", - "name": "codegen_broadcast_and_reshape", - "source_code": "def codegen_broadcast_and_reshape(self, value: str, initial_shape: Sequence[sympy.Expr], final_shape: Sequence[sympy.Expr], allow_implicit: bool) -> str: pre_broadcast_shape = [sympy.S.One if is_broadcasting else dim for dim, is_broadcasting in zip(self.broadcast_shape, self.broadcasting_dims)] value = triton_reshape(value, initial_shape, pre_broadcast_shape) sizevars = V.graph.sizevars supports_implicit_broadcast = allow_implicit and (len(pre_broadcast_shape) = = len(final_shape) and all((sizevars.statically_known_equals(pre_dim, 1) or sizevars.statically_known_equals(pre_dim, post_dim) for pre_dim, post_dim in zip(pre_broadcast_shape, final_shape)))) if any(self.broadcasting_dims) and (not supports_implicit_broadcast): value = f'tl.broadcast_to({value}, {V.kernel.index_to_str(self.broadcast_shape)})' value = triton_reshape(value, self.broadcast_shape, final_shape) return value", - "docstring": "Generate a broadcast and a reshape for the block pointer. This restores stride-0 dimensions which were removed from the block pointer.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\triton.py", - "ast_data": "FunctionDef name:codegen_broadcast_and_reshape arguments arg:self arg:value type:str arg:initial_shape type:Sequence[sympy.Expr] arg:final_shape type:Sequence[sympy.Expr] arg:allow_implicit type:bool Assign Assign Call call:triton_reshape Assign Assign BoolOp BoolOp Compare op:Eq Call call:all If BoolOp Call call:any Assign Assign Call call:triton_reshape Return return:yes" - }, - { - "library": "algorithms", - "name": "euler_totient", - "source_code": "def euler_totient(n): result = n for i in range(2, int(n ** 0.5) + 1): if n % i = = 0: while n % i = = 0: n // = i result - = result // i if n > 1: result - = result // n return result", - "docstring": "Euler's totient function or Phi function. Time Complexity: O(sqrt(n)).", - "type": "function", - "file_path": "algorithms\\algorithms\\maths\\find_primitive_root_simple.py", - "ast_data": "FunctionDef name:euler_totient arguments arg:n Assign For Call call:range If Compare op:Eq While Compare op:Eq If Compare op:Gt Return return:yes" - }, - { - "library": "pytorch", - "name": "BNReLU2d", - "source_code": "class BNReLU2d(nnq.BatchNorm2d): _FLOAT_MODULE = torch.ao.nn.intrinsic.BNReLU2d def __init__(self, num_features, eps = 1e-05, momentum = 0.1, device = None, dtype = None): super().__init__(num_features, eps = eps, momentum = momentum, device = device, dtype = dtype) def forward(self, input): if len(input.shape) ! = 4: raise ValueError('Input shape must be `(N, C, H, W)`!') return torch.ops.quantized.batch_norm2d_relu(input, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.scale, self.zero_point) def _get_name(self): return 'QuantizedBNReLU2d' @classmethod def from_float(cls, mod, use_precomputed_fake_quant = False): return super().from_float(mod, use_precomputed_fake_quant = use_precomputed_fake_quant) @classmethod def from_reference(cls, bn_relu, output_scale, output_zero_point): return super().from_reference(bn_relu[0], output_scale, output_zero_point)", - "docstring": "A BNReLU2d module is a fused module of BatchNorm2d and ReLU We adopt the same interface as :class:. Attributes: Same as torch.ao.nn.quantized.BatchNorm2d", - "type": "class", - "file_path": "pytorch\\torch\\ao\\nn\\intrinsic\\quantized\\modules\\bn_relu.py", - "ast_data": "ClassDef name:BNReLU2d Assign FunctionDef name:__init__ arguments arg:self arg:num_features arg:eps arg:momentum arg:device arg:dtype FunctionDef name:forward arguments arg:self arg:input If Compare op:NotEq Raise raises:ValueError('Input shape must be `(N, C, H, W)`!') Return return:yes FunctionDef name:_get_name arguments arg:self Return return:yes FunctionDef name:from_float arguments arg:cls arg:mod arg:use_precomputed_fake_quant Return return:yes FunctionDef name:from_reference arguments arg:cls arg:bn_relu arg:output_scale arg:output_zero_point Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, graph, fetches, feeds, feed_handles = None): with graph.as_default(): self._fetch_mapper = _FetchMapper.for_fetch(fetches) self._fetches = [] self._targets = [] self._feeds = feeds self._feed_handles = feed_handles or {} self._ops = [] self._fetch_handles = {} for fetch in self._fetch_mapper.unique_fetches(): if isinstance(fetch, ops.Operation): self._assert_fetchable(graph, fetch) self._targets.append(fetch) self._ops.append(True) else: self._assert_fetchable(graph, fetch.op) self._fetches.append(fetch) self._ops.append(False) if isinstance(fetch, tensor.Tensor) and (fetch.op.type = = 'GetSessionHandle' or fetch.op.type = = 'GetSessionHandleV2'): self._fetch_handles[fetch.ref()] = fetch.op.inputs[0].dtype self._final_fetches = [x for x in self._fetches if x.ref() not in feeds]", - "docstring": "Creates a fetch handler. Args: graph: Graph of the fetches. Used to check for fetchability and to convert all fetches to tensors or ops as needed. fetches: An arbitrary fetch structure: singleton, list, tuple, namedtuple, or dict. feeds: A feed dict where keys are Tensors. feed_handles: A dict from feed Tensors to TensorHandle objects used as direct feeds.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\client\\session.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:graph arg:fetches arg:feeds arg:feed_handles With Assign Call call:for_fetch Assign Assign Assign Assign BoolOp Assign Assign For Call call:unique_fetches If Call call:isinstance If BoolOp Call call:isinstance BoolOp Compare op:Eq Compare op:Eq Assign Assign" - }, - { - "library": "pandas", - "name": "apply_empty_result", - "source_code": "def apply_empty_result(self): assert callable(self.func) if self.result_type not in ['reduce', None]: return self.obj.copy() should_reduce = self.result_type = = 'reduce' from pandas import Series if not should_reduce: try: if self.axis = = 0: r = self.func(Series([], dtype = np.float64), *self.args, **self.kwargs) else: r = self.func(Series(index = self.columns, dtype = np.float64), *self.args, **self.kwargs) except Exception: pass else: should_reduce = not isinstance(r, Series) if should_reduce: if len(self.agg_axis): r = self.func(Series([], dtype = np.float64), *self.args, **self.kwargs) else: r = np.nan return self.obj._constructor_sliced(r, index = self.agg_axis) else: return self.obj.copy()", - "docstring": "we have an empty result; at least 1 axis is 0 we will try to apply the function to an empty series in order to see if this is a reduction function", - "type": "method", - "file_path": "pandas\\pandas\\core\\apply.py", - "ast_data": "FunctionDef name:apply_empty_result arguments arg:self If Compare op:NotIn Return return:yes Assign Compare op:Eq If Try If Compare op:Eq Assign Call call:func Assign Call call:func ExceptHandler Assign If If Call call:len Assign Call call:func Assign Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_adjustable", - "source_code": "def get_adjustable(self): return self._adjustable", - "docstring": "Return whether the Axes will adjust its physical dimension ('box') or its data limits ('datalim') to achieve the desired aspect ratio. See Also -------- matplotlib.axes.Axes.set_adjustable Set how the Axes adjusts to achieve the required aspect ratio. matplotlib.axes.Axes.set_aspect For a description of aspect handling.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", - "ast_data": "FunctionDef name:get_adjustable arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "OnnxBackend", - "source_code": "class OnnxBackend(enum.Enum): REFERENCE = 'ONNXReferenceEvaluator' ONNX_RUNTIME_CPU = 'CPUExecutionProvider' ONNX_RUNTIME_CUDA = 'CUDAExecutionProvider'", - "docstring": "Enum class for ONNX backend used for export verification. .. deprecated:: 2.7 Consider using `` to test the ONNX model.", - "type": "class", - "file_path": "pytorch\\torch\\onnx\\verification.py", - "ast_data": "ClassDef name:OnnxBackend Assign Assign Assign" - }, - { - "library": "pytorch", - "name": "pipeline", - "source_code": "def pipeline(module: torch.nn.Module, mb_args: tuple[Any, ...], mb_kwargs: Optional[dict[str, Any]] = None, split_spec: Optional[dict[str, SplitPoint]] = None, split_policy: Optional[Callable[[fx.GraphModule], fx.GraphModule]] = None) -> Pipe: if split_spec is not None and split_policy is not None: raise ValueError('Cannot specify both `split_spec` and `split_policy`. Please use only one of them.') if split_spec is not None: annotate_split_points(module, split_spec) return Pipe.from_tracing(mod = module, example_args = mb_args, example_kwargs = mb_kwargs) else: return Pipe.from_tracing(mod = module, example_args = mb_args, example_kwargs = mb_kwargs, split_policy = split_policy)", - "docstring": "Split a module based on a specification. See for more details. Arguments --------- module: The module to be splitted. mb_args: Example positional inputs, in micro-batch form. mb_kwargs: Example keyword inputs, in micro-batch form. (default: ) split_spec: A dictionary using submodule names as split marker. (default: ) split_policy: The policy to use for splitting the module. (default: ) Returns ------- A pipeline representation of class .", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\pipelining\\_IR.py", - "ast_data": "FunctionDef name:pipeline arguments arg:module type:torch.nn.Module arg:mb_args type:tuple[Any, ...] arg:mb_kwargs type:Optional[dict[str, Any]] arg:split_spec type:Optional[dict[str, SplitPoint]] arg:split_policy type:Optional[Callable[[fx.GraphModule], fx.GraphModule]] If BoolOp Compare op:IsNot Compare op:IsNot Raise raises:ValueError('Cannot specify both `split_spec` and `split_policy`. Please use only one of them.') If Compare op:IsNot Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "deprecated_internal_learning_phase_scope", - "source_code": "@tf_contextlib.contextmanager def deprecated_internal_learning_phase_scope(value): global _GRAPH_LEARNING_PHASES if value not in {0, 1}: raise ValueError('Expected learning phase to be 0 or 1.') with ops.init_scope(): if context.executing_eagerly(): previous_eager_value = _GRAPH_LEARNING_PHASES.get(_DUMMY_EAGER_GRAPH.key, None) previous_graph_value = _GRAPH_LEARNING_PHASES.get(get_graph(), None) learning_phase_previously_set = _DUMMY_EAGER_GRAPH.learning_phase_is_set try: deprecated_internal_set_learning_phase(value) yield finally: if not learning_phase_previously_set: _DUMMY_EAGER_GRAPH.learning_phase_is_set = False with ops.init_scope(): if context.executing_eagerly(): if previous_eager_value is not None: _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] = previous_eager_value elif _DUMMY_EAGER_GRAPH.key in _GRAPH_LEARNING_PHASES: del _GRAPH_LEARNING_PHASES[_DUMMY_EAGER_GRAPH.key] graph = get_graph() if previous_graph_value is not None: _GRAPH_LEARNING_PHASES[graph] = previous_graph_value elif graph in _GRAPH_LEARNING_PHASES: del _GRAPH_LEARNING_PHASES[graph]", - "docstring": "An internal-only version of . Unlike the public method, this method does not raise a deprecation warning. This is needed because saved model saving needs to set learning phase to maintain compatibility with code that sets/gets the learning phase, but saved model saving itself shouldn't raise a deprecation warning. We can get rid of this method and its usages when the public API is removed. Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Yields: None. Raises: ValueError: if is neither nor .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", - "ast_data": "FunctionDef name:deprecated_internal_learning_phase_scope arguments arg:value If Compare op:NotIn Raise raises:ValueError('Expected learning phase to be 0 or 1.') With If Call call:executing_eagerly Assign Call call:get Assign Call call:get Assign Try If Assign With If Call call:executing_eagerly If Compare op:IsNot Assign If Compare op:In Assign Call call:get_graph If Compare op:IsNot Assign If Compare op:In" - }, - { - "library": "tensorflow", - "name": "local_conv1d", - "source_code": "@dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def local_conv1d(inputs, kernel, kernel_size, strides, data_format = None): output_shape = (kernel.shape[0],) return local_conv(inputs, kernel, kernel_size, strides, output_shape, data_format)", - "docstring": "Apply 1D conv with un-shared weights. Args: inputs: 3D tensor with shape: (batch_size, steps, input_dim) if data_format is \"channels_last\" or (batch_size, input_dim, steps) if data_format is \"channels_first\". kernel: the unshared weight for convolution, with shape (output_length, feature_dim, filters). kernel_size: a tuple of a single integer, specifying the length of the 1D convolution window. strides: a tuple of a single integer, specifying the stride length of the convolution. data_format: the data format, channels_first or channels_last. Returns: A 3d tensor with shape: (batch_size, output_length, filters) if data_format='channels_first' or 3D tensor with shape: (batch_size, filters, output_length) if data_format='channels_last'.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", - "ast_data": "FunctionDef name:local_conv1d arguments arg:inputs arg:kernel arg:kernel_size arg:strides arg:data_format Assign Return return:yes" - }, - { - "library": "mongo", - "name": "is_primary", - "source_code": "@property def is_primary(self) -> bool: return self._server_property('is_writable')", - "docstring": "If this client is connected to a server that can accept writes. True if the current server is a standalone, mongos, or the primary of a replica set. If the client is not connected, this will block until a connection is established or raise ServerSelectionTimeoutError if no server is available.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\mongo_client.py", - "ast_data": "FunctionDef name:is_primary arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "integrate", - "source_code": "def integrate(self, t, step = False, relax = False): y = ode.integrate(self, t, step, relax) return y[: : 2] + 1j * y[1: : 2]", - "docstring": "Find y=y(t), set y as an initial condition, and return y. Parameters ---------- t : float The endpoint of the integration step. step : bool If True, and if the integrator supports the step method, then perform a single integration step and return. This parameter is provided in order to expose internals of the implementation, and should not be changed from its default value in most cases. relax : bool If True and if the integrator supports the run_relax method, then integrate until t_1 >= t and return. ``. This parameter is provided in order to expose internals of the implementation, and should not be changed from its default value in most cases. Returns ------- y : float The integrated value at t", - "type": "method", - "file_path": "scipy\\scipy\\integrate\\_ode.py", - "ast_data": "FunctionDef name:integrate arguments arg:self arg:t arg:step arg:relax Assign Call call:integrate Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_axislabel_direction", - "source_code": "def set_axislabel_direction(self, label_direction): self._axislabel_add_angle = _api.check_getitem({'+': 0, '-': 180}, label_direction = label_direction)", - "docstring": "Adjust the direction of the axis label. Note that the *label_direction*\\s '+' and '-' are relative to the direction of the increasing coordinate. Parameters ---------- label_direction : {\"+\", \"-\"}", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py", - "ast_data": "FunctionDef name:set_axislabel_direction arguments arg:self arg:label_direction Assign Call call:check_getitem" - }, - { - "library": "mongo", - "name": "load_verify_locations", - "source_code": "def load_verify_locations(self, cafile: Optional[str] = None, capath: Optional[str] = None) -> None: self._ctx.load_verify_locations(cafile, capath) if not hasattr(_SSL.Connection, 'get_verified_chain'): assert cafile is not None self._callback_data.trusted_ca_certs = _load_trusted_ca_certs(cafile)", - "docstring": "Load a set of \"certification authority\"(CA) certificates used to validate other peers' certificates when is other than ssl.CERT_NONE.", - "type": "method", - "file_path": "mongo\\pymongo\\pyopenssl_context.py", - "ast_data": "FunctionDef name:load_verify_locations arguments arg:self arg:cafile type:Optional[str] arg:capath type:Optional[str] If Assign Call call:_load_trusted_ca_certs" - }, - { - "library": "pytorch", - "name": "increment_step", - "source_code": "@classmethod def increment_step(cls, requester: str) -> int: if requester not in cls._step_dict: cls.init_step_count(requester) cls._step_dict[requester] + = 1 new_step = max(cls._step_dict.values()) if new_step > cls._current_step: delta = new_step - cls._current_step if delta > 1: warn(f'Profiler step count has increased more than 1 - current_step = {cls._current_step} step dict = {cls._step_dict}') for _ in range(0, delta): _kineto_step() cls._current_step = new_step return cls._current_step", - "docstring": "Increments the step count for the requester. Additionally if the max over all step counts has incremented then trigger the _kineto_step() returns global step count", - "type": "method", - "file_path": "pytorch\\torch\\autograd\\profiler.py", - "ast_data": "FunctionDef name:increment_step arguments arg:cls arg:requester type:str If Compare op:NotIn Assign Call call:max If Compare op:Gt Assign If Compare op:Gt For Call call:range Assign Return return:yes" - }, - { - "library": "mongo", - "name": "is_valid", - "source_code": "@classmethod def is_valid(cls: Type[ObjectId], oid: Any) -> bool: if not oid: return False try: ObjectId(oid) return True except (InvalidId, TypeError): return False", - "docstring": "Checks if a string is valid or not. :param oid: the object id to validate .. versionadded:: 2.3", - "type": "method", - "file_path": "mongo\\bson\\objectid.py", - "ast_data": "FunctionDef name:is_valid arguments arg:cls type:Type[ObjectId] arg:oid type:Any If Return return:yes Try Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "scipy", - "name": "hadamard", - "source_code": "def hadamard(n, dtype = int): if n < 1: lg2 = 0 else: lg2 = int(math.log(n, 2)) if 2 ** lg2 ! = n: raise ValueError('n must be an positive integer, and n must be a power of 2') H = np.array([[1]], dtype = dtype) for i in range(0, lg2): H = np.vstack((np.hstack((H, H)), np.hstack((H, -H)))) return H", - "docstring": "Construct an Hadamard matrix. Constructs an n-by-n Hadamard matrix, using Sylvester's construction. must be a power of 2. Parameters ---------- n : int The order of the matrix. must be a power of 2. dtype : dtype, optional The data type of the array to be constructed. Returns ------- H : (n, n) ndarray The Hadamard matrix. Notes ----- .. versionadded:: 0.8.0 Examples -------- >>> from scipy.linalg import hadamard >>> hadamard(2, dtype=complex) array([[ 1.+0.j, 1.+0.j], [ 1.+0.j, -1.-0.j]]) >>> hadamard(4) array([[ 1, 1, 1, 1], [ 1, -1, 1, -1], [ 1, 1, -1, -1], [ 1, -1, -1, 1]])", - "type": "function", - "file_path": "scipy\\scipy\\linalg\\_special_matrices.py", - "ast_data": "FunctionDef name:hadamard arguments arg:n arg:dtype If Compare op:Lt Assign Assign Call call:int If Compare op:NotEq Raise raises:ValueError('n must be an positive integer, and n must be a power of 2') Assign Call call:array For Call call:range Assign Call call:vstack Return return:yes" - }, - { - "library": "pytorch", - "name": "call_module", - "source_code": "def call_module(self, m: Module, forward: Callable, args: tuple[object, ...], kwargs: dict[str, object]) -> None: from torch._dynamo import OptimizedModule if isinstance(m, (OptimizedModule, GraphModule)): return forward(*args, **kwargs) try: return Tracer.call_module(self, m, forward, args, kwargs) except _ModuleNotInstalledAsSubmoduleError: log.debug('Unable to find the path of the module %s. This might be because the module was not properly registered as a submodule, which is not good practice. We will trace through the module without recording stack information.', str(m)) return forward(*args, **kwargs)", - "docstring": "PythonKeyTracer overrides call_module to avoid the scope handling, but we actually want it.", - "type": "method", - "file_path": "pytorch\\torch\\fx\\experimental\\proxy_tensor.py", - "ast_data": "FunctionDef name:call_module arguments arg:self arg:m type:Module arg:forward type:Callable arg:args type:tuple[object, ...] arg:kwargs type:dict[str, object] If Call call:isinstance Return return:yes Try Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "scrapy", - "name": "CSVFeedSpider", - "source_code": "class CSVFeedSpider(Spider): delimiter: str | None = None quotechar: str | None = None headers: list[str] | None = None def process_results(self, response: Response, results: Iterable[Any]) -> Iterable[Any]: return results def adapt_response(self, response: Response) -> Response: return response def parse_row(self, response: Response, row: dict[str, str]) -> Any: raise NotImplementedError def parse_rows(self, response: Response) -> Any: for row in csviter(response, self.delimiter, self.headers, quotechar = self.quotechar): ret = iterate_spider_output(self.parse_row(response, row)) yield from self.process_results(response, ret) def _parse(self, response: Response, **kwargs: Any) -> Any: if not hasattr(self, 'parse_row'): raise NotConfigured('You must define parse_row method in order to scrape this CSV feed') response = self.adapt_response(response) return self.parse_rows(response)", - "docstring": "Spider for parsing CSV feeds. It receives a CSV file in a response; iterates through each of its rows, and calls parse_row with a dict containing each field's data. You can set some options regarding the CSV file, such as the delimiter, quotechar and the file's headers.", - "type": "class", - "file_path": "scrapy\\scrapy\\spiders\\feed.py", - "ast_data": "ClassDef name:CSVFeedSpider FunctionDef name:process_results arguments arg:self arg:response type:Response arg:results type:Iterable[Any] Return return:yes FunctionDef name:adapt_response arguments arg:self arg:response type:Response Return return:yes FunctionDef name:parse_row arguments arg:self arg:response type:Response arg:row type:dict[str, str] Raise raises:NotImplementedError FunctionDef name:parse_rows arguments arg:self arg:response type:Response For Call call:csviter Assign Call call:iterate_spider_output FunctionDef name:_parse arguments arg:self arg:response type:Response kwarg:kwargs If Raise raises:NotConfigured('You must define parse_row method in order to scrape this CSV feed') Assign Call call:adapt_response Return return:yes" - }, - { - "library": "tensorflow", - "name": "enable_strict_mode", - "source_code": "@tf_export('experimental.enable_strict_mode') def enable_strict_mode(): global STRICT_MODE STRICT_MODE = True", - "docstring": "If called, enables strict mode for all behaviors. Used to switch all deprecation warnings to raise errors instead.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\strict_mode.py", - "ast_data": "FunctionDef name:enable_strict_mode arguments Call call:tf_export Assign" - }, - { - "library": "pytorch", - "name": "export_chrome_trace", - "source_code": "def export_chrome_trace(self, path): import os device_name = 'cuda' if not self._use_device else self._use_device with open(path, 'w') as f: next_id = 0 f.write('[') for evt in self: if evt.trace_name is None: continue f.write('{{\"name\": \"{}\", \"ph\": \"X\", \"ts\": {}, \"dur\": {}, \"tid\": {}, \"pid\": \"CPU functions\", \"args\": {{}}}}, '.format(evt.trace_name, evt.time_range.start, evt.time_range.elapsed_us(), evt.thread if not evt.is_remote else f'\" node_id: {evt.node_id}, thread_id: {evt.thread} \"')) for _ in evt.kernels: f.write(f'{{\"name\": \"{evt.trace_name}\", \"ph\": \"s\", \"ts\": {evt.time_range.start}, \"tid\": {evt.thread}, \"pid\": \"CPU functions\", \"id\": {next_id}, \"cat\": \"cpu_to_{device_name}\", \"args\": {{}}}}, ') next_id + = 1 if len(self) > 0: f.seek(f.tell() - 2, os.SEEK_SET) f.truncate() f.write(']')", - "docstring": "Export an EventList as a Chrome tracing tools file. The checkpoint can be later loaded and inspected under `` URL. Args: path (str): Path where the trace will be written.", - "type": "method", - "file_path": "pytorch\\torch\\autograd\\profiler_util.py", - "ast_data": "FunctionDef name:export_chrome_trace arguments arg:self arg:path Assign With Assign For If Compare op:Is For If Compare op:Gt" - }, - { - "library": "pytorch", - "name": "dtrace_structured", - "source_code": "def dtrace_structured(name: str, metadata_fn: Callable[[], Union[dict[str, Any], tuple[str, int]]] = dict, *, payload_fn: Callable[[], Optional[Union[str, object]]] = lambda: None, suppress_context: bool = False, expect_trace_id: bool = False, record_logging_overhead: bool = True): if GET_DTRACE_STRUCTURED: trace_structured(name, metadata_fn, payload_fn = payload_fn, suppress_context = suppress_context, expect_trace_id = expect_trace_id, record_logging_overhead = record_logging_overhead)", - "docstring": "For logging more detailed information used for debugging. This may result in the program becoming slow.", - "type": "function", - "file_path": "pytorch\\torch\\_logging\\_internal.py", - "ast_data": "FunctionDef name:dtrace_structured arguments arg:name type:str arg:metadata_fn type:Callable[[], Union[dict[str, Any], tuple[str, int]]] If" - }, - { - "library": "tensorflow", - "name": "on_predict_begin", - "source_code": "def on_predict_begin(self, logs = None): logs = self._process_logs(logs) for callback in self.callbacks: callback.on_predict_begin(logs)", - "docstring": "Calls the 'on_predict_begin` methods of its callbacks. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", - "ast_data": "FunctionDef name:on_predict_begin arguments arg:self arg:logs Assign Call call:_process_logs For" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, norm = None, cmap = None, *, colorizer = None, **kwargs): super().__init__(**kwargs) self._A = None self._colorizer = self._get_colorizer(colorizer = colorizer, norm = norm, cmap = cmap) self.colorbar = None self._id_colorizer = self._colorizer.callbacks.connect('changed', self.changed) self.callbacks = cbook.CallbackRegistry(signals = ['changed'])", - "docstring": "Parameters ---------- norm : (or subclass thereof) or str or None The normalizing object which scales data, typically into the interval `str.Normalize~matplotlib.colors.Colormap` The colormap used to map normalized data values to RGBA colors.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\colorizer.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:norm arg:cmap kwarg:kwargs Assign Assign Call call:_get_colorizer Assign Assign Call call:connect Assign Call call:CallbackRegistry" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, bymonth = None, bymonthday = 1, interval = 1, tz = None): if bymonth is None: bymonth = range(1, 13) rule = rrulewrapper(MONTHLY, bymonth = bymonth, bymonthday = bymonthday, interval = interval, **self.hms0d) super().__init__(rule, tz = tz)", - "docstring": "Parameters ---------- bymonth : int or list of int, default: all months Ticks will be placed on every month in *bymonth*. Default is `~datetime.tzinfotimezonedateutil.tz`.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\dates.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:bymonth arg:bymonthday arg:interval arg:tz If Compare op:Is Assign Call call:range Assign Call call:rrulewrapper" - }, - { - "library": "kornia", - "name": "download_image", - "source_code": "def download_image(url: str, save_to: str) -> None: im = Image.open(requests.get(url, stream = True, timeout = 30).raw) im.save(save_to)", - "docstring": "Download an image from a given URL and save it to a specified file path. Args: url: The URL of the image to download. save_to: The file path where the downloaded image will be saved.", - "type": "function", - "file_path": "kornia\\kornia\\utils\\sample.py", - "ast_data": "FunctionDef name:download_image arguments arg:url type:str arg:save_to type:str Assign Call call:open" - }, - { - "library": "scikit-learn", - "name": "fit_transform", - "source_code": "@_fit_context(prefer_skip_nested_validation = True) def fit_transform(self, X, y = None, init = None): if self.n_init = = 'warn': warnings.warn('The default value of `n_init` will change from 4 to 1 in 1.9.', FutureWarning) self._n_init = 4 else: self._n_init = self.n_init X = validate_data(self, X) if X.shape[0] = = X.shape[1] and self.dissimilarity ! = 'precomputed': warnings.warn(\"The MDS API has changed. ``fit`` now constructs a dissimilarity matrix from data. To use a custom dissimilarity matrix, set ``dissimilarity = 'precomputed'``.\") if self.dissimilarity = = 'precomputed': self.dissimilarity_matrix_ = X elif self.dissimilarity = = 'euclidean': self.dissimilarity_matrix_ = euclidean_distances(X) self.embedding_, self.stress_, self.n_iter_ = smacof(self.dissimilarity_matrix_, metric = self.metric, n_components = self.n_components, init = init, n_init = self._n_init, n_jobs = self.n_jobs, max_iter = self.max_iter, verbose = self.verbose, eps = self.eps, random_state = self.random_state, return_n_iter = True, normalized_stress = self.normalized_stress) return self.embedding_", - "docstring": "Fit the data from , and returns the embedded coordinates. Parameters ---------- X : array-like of shape (n_samples, n_features) or (n_samples, n_samples) Input data. If ``, the input should be the dissimilarity matrix. y : Ignored Not used, present for API consistency by convention. init : ndarray of shape (n_samples, n_components), default=None Starting configuration of the embedding to initialize the SMACOF algorithm. By default, the algorithm is initialized with a randomly chosen array. Returns ------- X_new : ndarray of shape (n_samples, n_components) X transformed in the new space.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\manifold\\_mds.py", - "ast_data": "FunctionDef name:fit_transform arguments arg:self arg:X arg:y arg:init Call call:_fit_context If Compare op:Eq Assign Assign Assign Call call:validate_data If BoolOp Compare op:Eq Compare op:NotEq If Compare op:Eq Assign If Compare op:Eq Assign Call call:euclidean_distances Assign Call call:smacof Return return:yes" - }, - { - "library": "pandas", - "name": "sunday_to_monday", - "source_code": "def sunday_to_monday(dt: datetime) -> datetime: if dt.weekday() = = 6: return dt + timedelta(1) return dt", - "docstring": "If holiday falls on Sunday, use day thereafter (Monday) instead.", - "type": "function", - "file_path": "pandas\\pandas\\tseries\\holiday.py", - "ast_data": "FunctionDef name:sunday_to_monday arguments arg:dt type:datetime If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "apply", - "source_code": "def apply(self, model_outputs: Any, model: torch.nn.Module | Callable | torch_export.ExportedProgram | None = None) -> Sequence[Any]: flattened_outputs, spec = pytree.tree_flatten(model_outputs) if self._spec is None: self._spec = spec else: _assert_identical_pytree_spec(self._spec, spec, error_message = 'Model outputs incompatible with the format that was exported. ') return flattened_outputs", - "docstring": "Flatten the model outputs and validate the output. Args: model_outputs: The model outputs to flatten. model: The PyTorch model. Returns: flattened_outputs: The flattened model outputs. Raises: ValueError: If the output produced from the current is not identical to the output produced from the first that was passed to this method.", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py", - "ast_data": "FunctionDef name:apply arguments arg:self arg:model_outputs type:Any arg:model type:torch.nn.Module | Callable | torch_export.ExportedProgram | None Assign Call call:tree_flatten If Compare op:Is Assign Return return:yes" - }, - { - "library": "kornia", - "name": "__init__", - "source_code": "def __init__(self, z: Tensor) -> None: super().__init__() KORNIA_CHECK_IS_TENSOR(z) check_so2_z_shape(z) self._z = Parameter(z)", - "docstring": "Construct the base class. Internally represented by complex number . Args: z: Complex number with the shape of :math: or :math:. Example: >>> real = torch.tensor(1.0) >>> imag = torch.tensor(2.0) >>> So2(torch.complex(real, imag)).z Parameter containing: tensor(1.+2.j, requires_grad=True)", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\liegroup\\so2.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:z type:Tensor Assign Call call:Parameter" - }, - { - "library": "django", - "name": "get_dated_queryset", - "source_code": "def get_dated_queryset(self, **lookup): qs = self.get_queryset().filter(**lookup) date_field = self.get_date_field() allow_future = self.get_allow_future() allow_empty = self.get_allow_empty() paginate_by = self.get_paginate_by(qs) if not allow_future: now = timezone.now() if self.uses_datetime_field else timezone_today() qs = qs.filter(**{'%s__lte' % date_field: now}) if not allow_empty: is_empty = not qs if paginate_by is None else not qs.exists() if is_empty: raise Http404(_('No %(verbose_name_plural)s available') % {'verbose_name_plural': qs.model._meta.verbose_name_plural}) return qs", - "docstring": "Get a queryset properly filtered according to and any extra lookup kwargs.", - "type": "method", - "file_path": "django\\django\\views\\generic\\dates.py", - "ast_data": "FunctionDef name:get_dated_queryset arguments arg:self kwarg:lookup Assign Call call:filter Assign Call call:get_date_field Assign Call call:get_allow_future Assign Call call:get_allow_empty Assign Call call:get_paginate_by If Assign Assign Call call:filter If Assign If Raise raises:Http404(_('No %(verbose_name_plural)s available') % {'verbose_name_plural': qs.model._meta.verbose_name_plural}) Return return:yes" - }, - { - "library": "pytorch", - "name": "set_printoptions", - "source_code": "def set_printoptions(precision = None, threshold = None, edgeitems = None, linewidth = None, profile = None, sci_mode = None): if profile is not None: if profile = = 'default': PRINT_OPTS.precision = 4 PRINT_OPTS.threshold = 1000 PRINT_OPTS.edgeitems = 3 PRINT_OPTS.linewidth = 80 elif profile = = 'short': PRINT_OPTS.precision = 2 PRINT_OPTS.threshold = 1000 PRINT_OPTS.edgeitems = 2 PRINT_OPTS.linewidth = 80 elif profile = = 'full': PRINT_OPTS.precision = 4 PRINT_OPTS.threshold = inf PRINT_OPTS.edgeitems = 3 PRINT_OPTS.linewidth = 80 if precision is not None: PRINT_OPTS.precision = precision if threshold is not None: PRINT_OPTS.threshold = threshold if edgeitems is not None: PRINT_OPTS.edgeitems = edgeitems if linewidth is not None: PRINT_OPTS.linewidth = linewidth PRINT_OPTS.sci_mode = sci_mode", - "docstring": "Set options for printing. Items shamelessly taken from NumPy Args: precision: Number of digits of precision for floating point output (default = 4). threshold: Total number of array elements which trigger summarization rather than full (default = 1000). edgeitems: Number of array items in summary at beginning and end of each dimension (default = 3). linewidth: The number of characters per line for the purpose of inserting line breaks (default = 80). Thresholded matrices will ignore this parameter. profile: Sane defaults for pretty printing. Can override with any of the above options. (any one of , , ) sci_mode: Enable (True) or disable (False) scientific notation. If None (default) is specified, the value is defined by . This value is automatically chosen by the framework. Example:: >>> # Limit the precision of elements >>> torch.set_printoptions(precision=2) >>> torch.tensor([1.12345]) tensor([1.12]) >>> # Limit the number of elements shown >>> torch.set_printoptions(threshold=5) >>> torch.arange(10) tensor([0, 1, 2, ..., 7, 8, 9]) >>> # Restore defaults >>> torch.set_printoptions(profile='default') >>> torch.tensor([1.12345]) tensor([1.1235]) >>> torch.arange(10) tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])", - "type": "function", - "file_path": "pytorch\\torch\\_tensor_str.py", - "ast_data": "FunctionDef name:set_printoptions arguments arg:precision arg:threshold arg:edgeitems arg:linewidth arg:profile arg:sci_mode If Compare op:IsNot If Compare op:Eq Assign Assign Assign Assign If Compare op:Eq Assign Assign Assign Assign If Compare op:Eq Assign Assign Assign Assign If Compare op:IsNot Assign If Compare op:IsNot Assign If Compare op:IsNot Assign If Compare op:IsNot Assign Assign" - }, - { - "library": "pytorch", - "name": "load_config", - "source_code": "def load_config(self, maybe_pickled_config: Union[bytes, dict[str, Any]]) -> None: if not isinstance(maybe_pickled_config, dict): config = pickle.loads(maybe_pickled_config) else: config = maybe_pickled_config for k, v in config.items(): if k in self._config: setattr(self, k, v) else: from torch._dynamo.utils import warn_once warn_once(f'key {k} with value {v} is not understood by this config')", - "docstring": "Restore from a prior call to save_config() or shallow_copy_dict()", - "type": "method", - "file_path": "pytorch\\torch\\utils\\_config_module.py", - "ast_data": "FunctionDef name:load_config arguments arg:self arg:maybe_pickled_config type:Union[bytes, dict[str, Any]] If Assign Call call:loads Assign For Call call:items If Compare op:In" - }, - { - "library": "mongo", - "name": "seek", - "source_code": "def seek(self, pos: int, whence: int = _SEEK_SET) -> int: if whence = = _SEEK_SET: new_pos = pos elif whence = = _SEEK_CUR: new_pos = self._position + pos elif whence = = _SEEK_END: new_pos = int(self.length) + pos else: raise OSError(22, 'Invalid value for `whence`') if new_pos < 0: raise OSError(22, 'Invalid value for `pos` - must be positive') if new_pos = = self._position: return new_pos self._position = new_pos self._buffer = EMPTY self._buffer_pos = 0 if self._chunk_iter: self._chunk_iter.close() self._chunk_iter = None return new_pos", - "docstring": "Set the current position of this file. :param pos: the position (or offset if using relative positioning) to seek to :param whence: where to seek from. :attr: (`os.SEEK_CURos.SEEK_ENDio.IOBase.seek`.", - "type": "method", - "file_path": "mongo\\gridfs\\synchronous\\grid_file.py", - "ast_data": "FunctionDef name:seek arguments arg:self arg:pos type:int arg:whence type:int If Compare op:Eq Assign If Compare op:Eq Assign If Compare op:Eq Assign Raise raises:OSError(22, 'Invalid value for `whence`') If Compare op:Lt Raise raises:OSError(22, 'Invalid value for `pos` - must be positive') If Compare op:Eq Return return:yes Assign Assign Assign If Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "extract_shape_from_varargs", - "source_code": "def extract_shape_from_varargs(shape: Union[ShapeType, tuple[ShapeType]], validate = True) -> tuple[int, ...]: if len(shape) = = 1 and isinstance(shape[0], Sequence): shape = shape[0] if validate: validate_shape(shape) return shape", - "docstring": "Returns a shape from varargs. In PyTorch, operations that accept shapes often accept them as varargs, like foo(*shape). However a user can pass the shape as a sequence of integers, like this: foo(1, 2, 3) or as a sequence of integers foo((1, 2, 3)) In the first case shape will be a tuple of integers, and in the second case it's a tuple containing a tuple of integers. This validates those inputs and canonicalizes them to a tuple of integers.", - "type": "function", - "file_path": "pytorch\\torch\\_prims_common\\__init__.py", - "ast_data": "FunctionDef name:extract_shape_from_varargs arguments arg:shape type:Union[ShapeType, tuple[ShapeType]] arg:validate If BoolOp Compare op:Eq Call call:isinstance Assign If Return return:yes" - }, - { - "library": "pytorch", - "name": "set_module_name_object_type_order", - "source_code": "def set_module_name_object_type_order(self, module_name: str, object_type: Callable, index: int, qconfig_list: list[QConfigAny]) -> QConfigMultiMapping: self._insert_qconfig_list('module_name_object_type_order_qconfigs', [module_name, object_type, index], qconfig_list) return self", - "docstring": "Set module_name QConfigs see :func: for more info", - "type": "method", - "file_path": "pytorch\\torch\\ao\\ns\\fx\\qconfig_multi_mapping.py", - "ast_data": "FunctionDef name:set_module_name_object_type_order arguments arg:self arg:module_name type:str arg:object_type type:Callable arg:index type:int arg:qconfig_list type:list[QConfigAny] Return return:yes" - }, - { - "library": "matplotlib", - "name": "tick_params", - "source_code": "def tick_params(self, axis = 'both', **kwargs): _api.check_in_list(['x', 'y', 'z', 'both'], axis = axis) if axis in ['x', 'y', 'both']: super().tick_params(axis, **kwargs) if axis in ['z', 'both']: zkw = dict(kwargs) zkw.pop('top', None) zkw.pop('bottom', None) zkw.pop('labeltop', None) zkw.pop('labelbottom', None) self.zaxis.set_tick_params(**zkw)", - "docstring": "Convenience method for changing the appearance of ticks and tick labels. See for full documentation. Because this function applies to 3D Axes, *axis* can also be set to 'z', and setting *axis* to 'both' autoscales all three axes. Also, because of how Axes3D objects are drawn very differently from regular 2D Axes, some of these settings may have ambiguous meaning. For simplicity, the 'z' axis will accept settings as if it was like the 'y' axis. .. note:: Axes3D currently ignores some of these settings.", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", - "ast_data": "FunctionDef name:tick_params arguments arg:self arg:axis kwarg:kwargs If Compare op:In If Compare op:In Assign Call call:dict" - }, - { - "library": "scipy", - "name": "EggHolder", - "source_code": "class EggHolder(Benchmark): change_dimensionality = True def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-512.1] * self.N, [512.0] * self.N)) self.global_optimum = [[512.0, 404.2319]] self.fglob = -959.640662711 def fun(self, x, *args): self.nfev + = 1 vec = -(x[1:] + 47) * sin(sqrt(abs(x[1:] + x[: -1] / 2.0 + 47))) - x[: -1] * sin(sqrt(abs(x[: -1] - (x[1:] + 47)))) return sum(vec)", - "docstring": "Egg Holder [1]_ objective function. This class defines the Egg Holder global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{EggHolder}}=\\sum_{1}^{n - 1}\\left[-\\left(x_{i + 1} + 47 \\right ) \\sin\\sqrt{\\lvert x_{i+1} + x_i/2 + 47 \\rvert} - x_i \\sin\\sqrt{\\lvert x_i - (x_{i + 1} + 47)\\rvert}\\right ] Here, :math: represents the number of dimensions and :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194. TODO: Jamil is missing a minus sign on the fglob value", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_E.py", - "ast_data": "ClassDef name:EggHolder Assign FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "append_step", - "source_code": "def append_step(self, step: OutputAdaptStep) -> None: self._steps.append(step)", - "docstring": "Appends a step to the output format steps. Args: step: The step to append.", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\_internal\\io_adapter.py", - "ast_data": "FunctionDef name:append_step arguments arg:self arg:step type:OutputAdaptStep" - }, - { - "library": "pytorch", - "name": "synchronize", - "source_code": "def synchronize(device: Optional[_device_t] = None) -> None: _lazy_init() with torch.cuda.device(device): return torch._C._cuda_synchronize()", - "docstring": "Wait for all kernels in all streams on a CUDA device to complete. Args: device (torch.device or int, optional): device for which to synchronize. It uses the current device, given by :func:, if :attr: is `` (default).", - "type": "function", - "file_path": "pytorch\\torch\\cuda\\__init__.py", - "ast_data": "FunctionDef name:synchronize arguments arg:device type:Optional[_device_t] With Return return:yes" - }, - { - "library": "pandas", - "name": "unique", - "source_code": "def unique(self) -> Self: pa_type = self._pa_array.type if pa_version_under11p0 and pa.types.is_duration(pa_type): data = self._pa_array.cast(pa.int64()) else: data = self._pa_array pa_result = pc.unique(data) if pa_version_under11p0 and pa.types.is_duration(pa_type): pa_result = pa_result.cast(pa_type) return type(self)(pa_result)", - "docstring": "Compute the ArrowExtensionArray of unique values. Returns ------- ArrowExtensionArray", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\arrow\\array.py", - "ast_data": "FunctionDef name:unique arguments arg:self Assign If BoolOp Call call:is_duration Assign Call call:cast Assign Assign Call call:unique If BoolOp Call call:is_duration Assign Call call:cast Return return:yes" - }, - { - "library": "mongo", - "name": "options", - "source_code": "async def options(self, session: Optional[AsyncClientSession] = None, comment: Optional[Any] = None) -> MutableMapping[str, Any]: dbo = self._database.client.get_database(self._database.name, self.codec_options, self.read_preference, self.write_concern, self.read_concern) cursor = await dbo.list_collections(session = session, filter = {'name': self._name}, comment = comment) result = None async for doc in cursor: result = doc break if not result: return {} options = result.get('options', {}) assert options is not None if 'create' in options: del options['create'] return options", - "docstring": "Get the options set on this collection. Returns a dictionary of options and their values - see :meth: for more information on the possible options. Returns an empty dictionary if the collection has not been created yet. :param session: a :class:. :param comment: A user-provided comment to attach to this command. .. versionchanged:: 3.6 Added `` parameter.", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\collection.py", - "ast_data": "AsyncFunctionDef name:options arguments arg:self arg:session type:Optional[AsyncClientSession] arg:comment type:Optional[Any] Assign Call call:get_database Assign Assign If Return return:yes Assign Call call:get If Compare op:In Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_one_half", - "source_code": "def set_one_half(self, one_half): self._one_half = one_half", - "docstring": "Set the way one half is displayed. one_half : str The string used to represent 1/2.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", - "ast_data": "FunctionDef name:set_one_half arguments arg:self arg:one_half Assign" - }, - { - "library": "tensorflow", - "name": "load_graph", - "source_code": "def load_graph(self, returns, meta_graph_def): saver, _ = tf_saver._import_meta_graph_with_return_elements(meta_graph_def) returns[0] = saver", - "docstring": "Called from wrap_function to import .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\saved_model\\load_v1_in_v2.py", - "ast_data": "FunctionDef name:load_graph arguments arg:self arg:returns arg:meta_graph_def Assign Call call:_import_meta_graph_with_return_elements Assign" - }, - { - "library": "matplotlib", - "name": "figaspect", - "source_code": "def figaspect(arg): isarray = hasattr(arg, 'shape') and (not np.isscalar(arg)) figsize_min = np.array((4.0, 2.0)) figsize_max = np.array((16.0, 16.0)) if isarray: nr, nc = arg.shape[: 2] arr_ratio = nr / nc else: arr_ratio = arg fig_height = mpl.rcParams['figure.figsize'][1] newsize = np.array((fig_height / arr_ratio, fig_height)) newsize / = min(1.0, *newsize / figsize_min) newsize / = max(1.0, *newsize / figsize_max) newsize = np.clip(newsize, figsize_min, figsize_max) return newsize", - "docstring": "Calculate the width and height for a figure with a specified aspect ratio. While the height is taken from :rc:, the width is adjusted to match the desired aspect ratio. Additionally, it is ensured that the width is in the range [4., 16.] and the height is in the range [2., 16.]. If necessary, the default height is adjusted to ensure this. Parameters ---------- arg : float or 2D array If a float, this defines the aspect ratio (i.e. the ratio height / width). In case of an array the aspect ratio is number of rows / number of columns, so that the array could be fitted in the figure undistorted. Returns ------- size : (2,) array The width and height of the figure in inches. Notes ----- If you want to create an Axes within the figure, that still preserves the aspect ratio, be sure to create it with equal width and height. See examples below. Thanks to Fernando Perez for this function. Examples -------- Make a figure twice as tall as it is wide:: w, h = figaspect(2.) fig = Figure(figsize=(w, h)) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) ax.imshow(A, **kwargs) Make a figure with the proper aspect for an array:: A = rand(5, 3) w, h = figaspect(A) fig = Figure(figsize=(w, h)) ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) ax.imshow(A, **kwargs)", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\figure.py", - "ast_data": "FunctionDef name:figaspect arguments arg:arg Assign BoolOp Call call:hasattr Assign Call call:array Assign Call call:array If Assign Assign Assign Assign Assign Call call:array Assign Call call:clip Return return:yes" - }, - { - "library": "matplotlib", - "name": "PlaceHolderLayoutEngine", - "source_code": "class PlaceHolderLayoutEngine(LayoutEngine): def __init__(self, adjust_compatible, colorbar_gridspec, **kwargs): self._adjust_compatible = adjust_compatible self._colorbar_gridspec = colorbar_gridspec super().__init__(**kwargs) def execute(self, fig): return", - "docstring": "This layout engine does not adjust the figure layout at all. The purpose of this is to act as a placeholder when the user removes a layout engine to ensure an incompatible cannot be set later. Parameters ---------- adjust_compatible, colorbar_gridspec : bool Allow the PlaceHolderLayoutEngine to mirror the behavior of whatever layout engine it is replacing.", - "type": "class", - "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py", - "ast_data": "ClassDef name:PlaceHolderLayoutEngine FunctionDef name:__init__ arguments arg:self arg:adjust_compatible arg:colorbar_gridspec kwarg:kwargs Assign Assign FunctionDef name:execute arguments arg:self arg:fig Return return:no" - }, - { - "library": "django", - "name": "__init__", - "source_code": "def __init__(self, default_settings): self.__dict__['_deleted'] = set() self.default_settings = default_settings", - "docstring": "Requests for configuration variables not in this class are satisfied from the module specified in default_settings (if possible).", - "type": "method", - "file_path": "django\\django\\conf\\__init__.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:default_settings Assign Call call:set Assign" - }, - { - "library": "tensorflow", - "name": "__call__", - "source_code": "@abc.abstractmethod def __call__(self): pass", - "docstring": "Returns the current loss scale as a scalar tensor.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\experimental\\loss_scale.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self" - }, - { - "library": "pandas", - "name": "nbytes", - "source_code": "@property def nbytes(self) -> int: return self._values.nbytes", - "docstring": "Return the number of bytes in the underlying data. See Also -------- Series.ndim : Number of dimensions of the underlying data. Series.size : Return the number of elements in the underlying data. Examples -------- For Series: >>> s = pd.Series([\"Ant\", \"Bear\", \"Cow\"]) >>> s 0 Ant 1 Bear 2 Cow dtype: object >>> s.nbytes 24 For Index: >>> idx = pd.Index([1, 2, 3]) >>> idx Index([1, 2, 3], dtype='int64') >>> idx.nbytes 24", - "type": "method", - "file_path": "pandas\\pandas\\core\\base.py", - "ast_data": "FunctionDef name:nbytes arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_position", - "source_code": "def get_position(self, original = False): if original: return self._originalPosition.frozen() else: locator = self.get_axes_locator() if not locator: self.apply_aspect() return self._position.frozen()", - "docstring": "Return the position of the Axes within the figure as a . Parameters ---------- original : bool If `.set_position.Bbox`", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", - "ast_data": "FunctionDef name:get_position arguments arg:self arg:original If Return return:yes Assign Call call:get_axes_locator If Return return:yes" - }, - { - "library": "pytorch", - "name": "get_op_profiles", - "source_code": "def get_op_profiles(gm: torch.fx.GraphModule, ops_to_guard: set[str]) -> dict[str, set[OpProfile]]: def _get_op_profile(node: torch.fx.Node) -> OpProfile: args_profile = tuple([TensorMetadata.maybe_from_tensor(arg.meta.get('val')) if isinstance(arg, torch.fx.Node) else None for arg in (*node.args, *node.kwargs.values())]) out_profile = None meta = node.meta.get('val') assert meta is not None if isinstance(meta, torch.Tensor): out_profile = TensorMetadata.maybe_from_tensor(meta) elif isinstance(meta, (list, tuple)): out_profile = tuple([TensorMetadata.maybe_from_tensor(m) for m in meta]) assert out_profile is not None return OpProfile(args_profile, out_profile) op_profiles: dict[str, set[OpProfile]] = defaultdict(set) for node in gm.graph.nodes: if node.op = = 'call_function' and str(node.target) in ops_to_guard: op_profiles[str(node.target)].add(_get_op_profile(node)) return op_profiles", - "docstring": "This is used by draft_export to get a list of custom operator profiles so that we can generate fake kernels.", - "type": "function", - "file_path": "pytorch\\torch\\_export\\passes\\insert_custom_op_guards.py", - "ast_data": "FunctionDef name:get_op_profiles arguments arg:gm type:torch.fx.GraphModule arg:ops_to_guard type:set[str] FunctionDef name:_get_op_profile arguments arg:node type:torch.fx.Node Assign Call call:tuple Assign Assign Call call:get If Call call:isinstance Assign Call call:maybe_from_tensor If Call call:isinstance Assign Call call:tuple Return return:yes For If BoolOp Compare op:Eq Compare op:In Return return:yes" - }, - { - "library": "django", - "name": "time_extract_sql", - "source_code": "def time_extract_sql(self, lookup_type, sql, params): return self.date_extract_sql(lookup_type, sql, params)", - "docstring": "Given a lookup_type of 'hour', 'minute', or 'second', return the SQL that extracts a value from the given time field field_name.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\operations.py", - "ast_data": "FunctionDef name:time_extract_sql arguments arg:self arg:lookup_type arg:sql arg:params Return return:yes" - }, - { - "library": "pytorch", - "name": "create_default_global_load_plan", - "source_code": "def create_default_global_load_plan(all_plans: list[LoadPlan]) -> list[LoadPlan]: return all_plans", - "docstring": "Create global load plan used by DefaultLoadPlanner. The default load behavior involved no global coordination and this function currently doesn't change the local plans.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\checkpoint\\default_planner.py", - "ast_data": "FunctionDef name:create_default_global_load_plan arguments arg:all_plans type:list[LoadPlan] Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_adjustable", - "source_code": "def set_adjustable(self, adjustable, share = False): _api.check_in_list(['box', 'datalim'], adjustable = adjustable) if share: axs = {sibling for name in self._axis_names for sibling in self._shared_axes[name].get_siblings(self)} else: axs = [self] if adjustable = = 'datalim' and any((getattr(ax.get_data_ratio, '__func__', None) ! = _AxesBase.get_data_ratio for ax in axs)): raise ValueError(\"Cannot set Axes adjustable to 'datalim' for Axes which override 'get_data_ratio'\") for ax in axs: ax._adjustable = adjustable self.stale = True", - "docstring": "Set how the Axes adjusts to achieve the required aspect ratio. Parameters ---------- adjustable : {'box', 'datalim'} If 'box', change the physical dimensions of the Axes. If 'datalim', change the ``, apply the settings to all shared Axes. See Also -------- matplotlib.axes.Axes.set_aspect For a description of aspect handling. Notes ----- Shared Axes (of which twinned Axes are a special case) impose restrictions on how aspect ratios can be imposed. For twinned Axes, use 'datalim'. For Axes that share both x and y, use 'box'. Otherwise, either 'datalim' or 'box' may be used. These limitations are partly a requirement to avoid over-specification, and partly a result of the particular implementation we are currently using, in which the adjustments for aspect ratios are done sequentially and independently on each Axes as it is drawn.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", - "ast_data": "FunctionDef name:set_adjustable arguments arg:self arg:adjustable arg:share If Assign Assign If BoolOp Compare op:Eq Call call:any Raise raises:ValueError(\"Cannot set Axes adjustable to 'datalim' for Axes which override 'get_data_ratio'\") For Assign Assign" - }, - { - "library": "scipy", - "name": "Matyas", - "source_code": "class Matyas(Benchmark): def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N)) self.global_optimum = [[0 for _ in range(self.N)]] self.fglob = 0.0 def fun(self, x, *args): self.nfev + = 1 return 0.26 * (x[0] ** 2 + x[1] ** 2) - 0.48 * x[0] * x[1]", - "docstring": "Matyas objective function. This class defines the Matyas [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Matyas}}(x) = 0.26(x_1^2 + x_2^2) - 0.48 x_1 x_2 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py", - "ast_data": "ClassDef name:Matyas FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Return return:yes" - }, - { - "library": "django", - "name": "remove_replaced_nodes", - "source_code": "def remove_replaced_nodes(self, replacement, replaced): replaced = set(replaced) try: replacement_node = self.node_map[replacement] except KeyError as err: raise NodeNotFoundError('Unable to find replacement node %r. It was either never added to the migration graph, or has been removed.' % (replacement,), replacement) from err for replaced_key in replaced: self.nodes.pop(replaced_key, None) replaced_node = self.node_map.pop(replaced_key, None) if replaced_node: for child in replaced_node.children: child.parents.remove(replaced_node) if child.key not in replaced: replacement_node.add_child(child) child.add_parent(replacement_node) for parent in replaced_node.parents: parent.children.remove(replaced_node) if parent.key not in replaced: replacement_node.add_parent(parent) parent.add_child(replacement_node)", - "docstring": "Remove each of the nodes (when they exist). Any dependencies that were referencing them are changed to reference the node instead.", - "type": "method", - "file_path": "django\\django\\db\\migrations\\graph.py", - "ast_data": "FunctionDef name:remove_replaced_nodes arguments arg:self arg:replacement arg:replaced Assign Call call:set Try Assign ExceptHandler Raise raises:NodeNotFoundError('Unable to find replacement node %r. It was either never added to the migration graph, or has been removed.' % (replacement,), replacement) For Assign Call call:pop If For If Compare op:NotIn For If Compare op:NotIn" - }, - { - "library": "tensorflow", - "name": "get_default_values", - "source_code": "@classmethod def get_default_values(cls, obj: Callable[..., Any], *, follow_wrapped: bool = True) -> Dict[str, Any]: signature = super().from_callable(obj, follow_wrapped = follow_wrapped) default_values = {} for p in signature.parameters.values(): if p.default is not p.empty: default_values[p.name] = p.default return default_values", - "docstring": "Inspects and returns a dictionary of default values.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py", - "ast_data": "FunctionDef name:get_default_values arguments arg:cls arg:obj type:Callable[..., Any] Assign Call call:from_callable Assign For Call call:values If Compare op:IsNot Assign Return return:yes" - }, - { - "library": "django", - "name": "max_name_length", - "source_code": "def max_name_length(self): return None", - "docstring": "Return the maximum length of table and column names, or None if there is no limit.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\operations.py", - "ast_data": "FunctionDef name:max_name_length arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "first_call_function_nn_module_stack", - "source_code": "def first_call_function_nn_module_stack(graph: torch.fx.Graph) -> Optional[dict]: for node in graph.nodes: if node.op = = 'call_function' and 'nn_module_stack' in node.meta: return node.meta['nn_module_stack'] return None", - "docstring": "Returns the nn_module_stack of the first call_function node.", - "type": "function", - "file_path": "pytorch\\torch\\fx\\_utils.py", - "ast_data": "FunctionDef name:first_call_function_nn_module_stack arguments arg:graph type:torch.fx.Graph For If BoolOp Compare op:Eq Compare op:In Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "check_tensor", - "source_code": "def check_tensor(shards_metadata, tensor_dims) -> None: tensor_rank = len(tensor_dims) shards_rank = len(shards_metadata[0].shard_offsets) if tensor_rank ! = shards_rank: raise ValueError(f'Rank of tensor is {tensor_rank}, but shards rank is {shards_rank}') total_shard_volume = 0 for shard in shards_metadata: shard_volume = 1 for i, shard_length in enumerate(shard.shard_sizes): shard_volume * = shard_length if shard.shard_offsets[i] + shard.shard_sizes[i] > tensor_dims[i]: raise ValueError(f'Shard offset {shard.shard_offsets[i]} and length {shard.shard_sizes[i]} exceeds tensor dim: {tensor_dims[i]} for shard {shard}') total_shard_volume + = shard_volume tensor_volume = 1 for size in tensor_dims: tensor_volume * = size if total_shard_volume ! = tensor_volume: raise ValueError(f'Total volume of shards: {total_shard_volume} does not match tensor volume: {tensor_volume}, in other words all the individual shards do not cover the entire tensor')", - "docstring": "Checks if the shards_metadata is compatible with the provided tensor dims. Args: shards_metadata(List[ShardMetadata]): List of :class: objects representing each shard of the tensor. tensor_dims(Sequence of int): Dimensions of tensor to verify Raises: `` if not compatible.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\_internals.py", - "ast_data": "FunctionDef name:check_tensor arguments arg:shards_metadata arg:tensor_dims Assign Call call:len Assign Call call:len If Compare op:NotEq Raise raises:ValueError(f'Rank of tensor is {tensor_rank}, but shards rank is {shards_rank}') Assign For Assign For Call call:enumerate If Compare op:Gt Raise raises:ValueError(f'Shard offset {shard.shard_offsets[i]} and length {shard.shard_sizes[i]} exceeds tensor dim: {tensor_dims[i]} for shard {shard}') Assign For If Compare op:NotEq Raise raises:ValueError(f'Total volume of shards: {total_shard_volume} does not match tensor volume: {tensor_volume}, in other words all the individual shards do not cover the entire tensor')" - }, - { - "library": "pytorch", - "name": "propagate_qconfig_", - "source_code": "def propagate_qconfig_(module, qconfig_dict = None, prepare_custom_config_dict = None): if qconfig_dict is None: qconfig_dict = {} if prepare_custom_config_dict is None: prepare_custom_config_dict = {} _propagate_qconfig_helper(module, qconfig_dict, prepare_custom_config_dict = prepare_custom_config_dict)", - "docstring": "Propagate qconfig through the module hierarchy and assign attribute on each leaf module Args: module: input module qconfig_dict: dictionary that maps from name or type of submodule to quantization configuration, qconfig applies to all submodules of a given module unless qconfig for the submodules are specified (when the submodule already has qconfig attribute) prepare_custom_config_dict: dictionary for custom handling of modules see docs for :func: Return: None, module is modified inplace with qconfig attached", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py", - "ast_data": "FunctionDef name:propagate_qconfig_ arguments arg:module arg:qconfig_dict arg:prepare_custom_config_dict If Compare op:Is Assign If Compare op:Is Assign" - }, - { - "library": "matplotlib", - "name": "resolve_gui_or_backend", - "source_code": "def resolve_gui_or_backend(self, gui_or_backend): if not gui_or_backend.startswith('module: //'): gui_or_backend = gui_or_backend.lower() backend = self.backend_for_gui_framework(gui_or_backend) if backend is not None: return (backend, gui_or_backend if gui_or_backend ! = 'headless' else None) try: return self.resolve_backend(gui_or_backend) except Exception: raise RuntimeError(f\"'{gui_or_backend}' is not a recognised GUI loop or backend name\")", - "docstring": "Return the backend and GUI framework for the specified string that may be either a GUI framework or a backend name, tested in that order. This is for use with the IPython %matplotlib magic command which may be a GUI framework such as `` format. Parameters ---------- gui_or_backend : str or None Name of GUI framework or backend, or None to use the default backend. Returns ------- backend : str The backend name. framework : str or None The GUI framework, which will be None for a backend that is non-interactive.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backends\\registry.py", - "ast_data": "FunctionDef name:resolve_gui_or_backend arguments arg:self arg:gui_or_backend If Assign Call call:lower Assign Call call:backend_for_gui_framework If Compare op:IsNot Return return:yes Try Return return:yes ExceptHandler Raise raises:RuntimeError(f\"'{gui_or_backend}' is not a recognised GUI loop or backend name\")" - }, - { - "library": "kornia", - "name": "__call__", - "source_code": "def __call__(self, *inputs: Any, input_names_to_handle: Optional[list[Any]] = None, output_type: str = 'tensor', **kwargs: Any) -> Any: if not self._disable_features: decorated_forward = self.convert_input_output(input_names_to_handle = input_names_to_handle, output_type = output_type)(super().__call__) _output_image = decorated_forward(*inputs, **kwargs) if output_type = = 'tensor': self._output_image = self._detach_tensor_to_cpu(_output_image) else: self._output_image = _output_image else: _output_image = super().__call__(*inputs, **kwargs) return _output_image", - "docstring": "Overwrite the __call__ function to handle various inputs. Args: inputs: Inputs to operate on. input_names_to_handle: List of input names to convert, if None, handle all inputs. output_type: Desired output type ('tensor', 'numpy', or 'pil'). kwargs: Additional arguments. Returns: Callable: Decorated function with converted input and output types.", - "type": "method", - "file_path": "kornia\\kornia\\core\\module.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self vararg:inputs kwarg:kwargs If Assign Call Assign Call call:decorated_forward If Compare op:Eq Assign Call call:_detach_tensor_to_cpu Assign Assign Call call:__call__ Return return:yes" - }, - { - "library": "scikit-learn", - "name": "init", - "source_code": "def init(self, est, begin_at_stage = 0): header_fields = ['Iter', 'Train Loss'] verbose_fmt = ['{iter: >10d}', '{train_score: >16.4f}'] if est.subsample < 1: header_fields.append('OOB Improve') verbose_fmt.append('{oob_impr: >16.4f}') header_fields.append('Remaining Time') verbose_fmt.append('{remaining_time: >16s}') print(('%10s ' + '%16s ' * (len(header_fields) - 1)) % tuple(header_fields)) self.verbose_fmt = ' '.join(verbose_fmt) self.verbose_mod = 1 self.start_time = time() self.begin_at_stage = begin_at_stage", - "docstring": "Initialize reporter Parameters ---------- est : Estimator The estimator begin_at_stage : int, default=0 stage at which to begin reporting", - "type": "method", - "file_path": "scikit-learn\\sklearn\\ensemble\\_gb.py", - "ast_data": "FunctionDef name:init arguments arg:self arg:est arg:begin_at_stage Assign Assign If Compare op:Lt Assign Call call:join Assign Assign Call call:time Assign" - }, - { - "library": "tensorflow", - "name": "get_op_sharding", - "source_code": "def get_op_sharding(op): try: return op.get_attr('_XlaSharding') except ValueError: return None except AttributeError: return None", - "docstring": "Returns sharding attribute of an op. Args: op: a TensorFlow op. Returns: The attribute representing XLA sharding on this op.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py", - "ast_data": "FunctionDef name:get_op_sharding arguments arg:op Try Return return:yes ExceptHandler Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "scipy", - "name": "stopping_criteria", - "source_code": "def stopping_criteria(self): if self.maxiter is not None: self.finite_iterations() if self.iters is not None: self.finite_iterations() if self.maxfev is not None: self.finite_fev() if self.maxev is not None: self.finite_ev() if self.maxtime is not None: self.finite_time() if self.f_min_true is not None: self.finite_precision() if self.minhgrd is not None: self.finite_homology_growth() return self.stop_global", - "docstring": "Various stopping criteria ran every iteration Returns ------- stop : bool", - "type": "method", - "file_path": "scipy\\scipy\\optimize\\_shgo.py", - "ast_data": "FunctionDef name:stopping_criteria arguments arg:self If Compare op:IsNot If Compare op:IsNot If Compare op:IsNot If Compare op:IsNot If Compare op:IsNot If Compare op:IsNot If Compare op:IsNot Return return:yes" - }, - { - "library": "numpy", - "name": "__setstate__", - "source_code": "def __setstate__(self, state): ver, shp, typ, isf, raw, msk, flv = state np.ndarray.__setstate__(self, (shp, typ, isf, raw)) mdtype = np.dtype([(k, np.bool) for k, _ in self.dtype.descr]) self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk)) self.fill_value = flv", - "docstring": "Restore the internal state of the masked array. This is for pickling. `` output, and is a 5-tuple: - class name - a tuple giving the shape of the data - a typecode for the data - a binary string for the data - a binary string for the mask.", - "type": "method", - "file_path": "numpy\\numpy\\ma\\mrecords.py", - "ast_data": "FunctionDef name:__setstate__ arguments arg:self arg:state Assign Assign Call call:dtype Assign" - }, - { - "library": "matplotlib", - "name": "get_ticks_direction", - "source_code": "def get_ticks_direction(self, minor = False): if minor: return np.array([tick._tickdir for tick in self.get_minor_ticks()]) else: return np.array([tick._tickdir for tick in self.get_major_ticks()])", - "docstring": "Return an array of this Axis' tick directions. Parameters ---------- minor : bool, default: False True to return the minor tick directions, False to return the major tick directions. Returns ------- array of tick directions", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axis.py", - "ast_data": "FunctionDef name:get_ticks_direction arguments arg:self arg:minor If Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "value_type", - "source_code": "@property def value_type(self): return Tensor", - "docstring": "The Python type for values that are compatible with this TypeSpec.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor.py", - "ast_data": "FunctionDef name:value_type arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "xframe_options_sameorigin", - "source_code": "def xframe_options_sameorigin(view_func): if iscoroutinefunction(view_func): async def _view_wrapper(*args, **kwargs): response = await view_func(*args, **kwargs) if response.get('X-Frame-Options') is None: response['X-Frame-Options'] = 'SAMEORIGIN' return response else: def _view_wrapper(*args, **kwargs): response = view_func(*args, **kwargs) if response.get('X-Frame-Options') is None: response['X-Frame-Options'] = 'SAMEORIGIN' return response return wraps(view_func)(_view_wrapper)", - "docstring": "Modify a view function so its response has the X-Frame-Options HTTP header set to 'SAMEORIGIN' as long as the response doesn't already have that header set. Usage: @xframe_options_sameorigin def some_view(request): ...", - "type": "function", - "file_path": "django\\django\\views\\decorators\\clickjacking.py", - "ast_data": "FunctionDef name:xframe_options_sameorigin arguments arg:view_func If Call call:iscoroutinefunction AsyncFunctionDef name:_view_wrapper arguments vararg:args kwarg:kwargs Assign If Compare op:Is Assign Return return:yes FunctionDef name:_view_wrapper arguments vararg:args kwarg:kwargs Assign Call call:view_func If Compare op:Is Assign Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_xbound", - "source_code": "def set_xbound(self, lower = None, upper = None): if upper is None and np.iterable(lower): lower, upper = lower old_lower, old_upper = self.get_xbound() if lower is None: lower = old_lower if upper is None: upper = old_upper self.set_xlim(sorted((lower, upper), reverse = bool(self.xaxis_inverted())), auto = None)", - "docstring": "Set the lower and upper numerical bounds of the x-axis. This method will honor axis inversion regardless of parameter order. It will not change the autoscaling setting (). Parameters ---------- lower, upper : float or None The lower and upper bounds. If *None*, the respective axis bound is not modified. .. ACCEPTS: (lower: float, upper: float) See Also -------- get_xbound get_xlim, set_xlim invert_xaxis, xaxis_inverted", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", - "ast_data": "FunctionDef name:set_xbound arguments arg:self arg:lower arg:upper If BoolOp Compare op:Is Call call:iterable Assign Assign Call call:get_xbound If Compare op:Is Assign If Compare op:Is Assign" - }, - { - "library": "pytorch", - "name": "filter_symbols", - "source_code": "def filter_symbols(symbols: OrderedSet[sympy.Symbol]) -> OrderedSet[sympy.Symbol]: return OrderedSet((s for s in symbols if symbol_is_type(s, (SymT.SIZE, SymT.FLOAT, SymT.UNBACKED_INT, SymT.UNBACKED_FLOAT))))", - "docstring": "Filters a set of symbols that are required for codegen. Skip symbols that are always internal to kernels, such as SymT.TMP, SymT.INDEX, and SymT.R0_INDEX.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\scheduler.py", - "ast_data": "FunctionDef name:filter_symbols arguments arg:symbols type:OrderedSet[sympy.Symbol] Return return:yes" - }, - { - "library": "scipy", - "name": "tanm", - "source_code": "@_apply_over_batch(('A', 2)) def tanm(A): A = _asarray_square(A) return _maybe_real(A, solve(cosm(A), sinm(A)))", - "docstring": "Compute the matrix tangent. This routine uses expm to compute the matrix exponentials. Parameters ---------- A : (N, N) array_like Input array. Returns ------- tanm : (N, N) ndarray Matrix tangent of Examples -------- >>> import numpy as np >>> from scipy.linalg import tanm, sinm, cosm >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) >>> t = tanm(a) >>> t array([[ -2.00876993, -8.41880636], [ -2.80626879, -10.42757629]]) Verify tanm(a) = sinm(a).dot(inv(cosm(a))) >>> s = sinm(a) >>> c = cosm(a) >>> s.dot(np.linalg.inv(c)) array([[ -2.00876993, -8.41880636], [ -2.80626879, -10.42757629]])", - "type": "function", - "file_path": "scipy\\scipy\\linalg\\_matfuncs.py", - "ast_data": "FunctionDef name:tanm arguments arg:A Call call:_apply_over_batch Assign Call call:_asarray_square Return return:yes" - }, - { - "library": "pytorch", - "name": "or_masks", - "source_code": "def or_masks(*mask_mods: _mask_mod_signature) -> _mask_mod_signature: if not all((callable(arg) for arg in mask_mods)): raise RuntimeError(f'All inputs should be callable mask_mods: {mask_mods}') def or_mask(b, h, q_idx, kv_idx): result = b.new_zeros((), dtype = torch.bool) for mask in mask_mods: result = result | mask(b, h, q_idx, kv_idx) return result return or_mask", - "docstring": "Returns a mask_mod that's the union of provided mask_mods", - "type": "function", - "file_path": "pytorch\\torch\\nn\\attention\\flex_attention.py", - "ast_data": "FunctionDef name:or_masks arguments vararg:mask_mods If Raise raises:RuntimeError(f'All inputs should be callable mask_mods: {mask_mods}') FunctionDef name:or_mask arguments arg:b arg:h arg:q_idx arg:kv_idx Assign Call call:new_zeros For Assign Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "referenced_base_fields", - "source_code": "@cached_property def referenced_base_fields(self): from django.db.models.sql import query return {child.split(LOOKUP_SEP, 1)[0] for child in query.get_children_from_q(self)}", - "docstring": "Retrieve all base fields referenced directly or through F expressions excluding any fields referenced through joins.", - "type": "method", - "file_path": "django\\django\\db\\models\\query_utils.py", - "ast_data": "FunctionDef name:referenced_base_fields arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "element_spec", - "source_code": "@property def element_spec(self): raise NotImplementedError('DistributedDataset.element_spec must be implemented in descendants.')", - "docstring": "The type specification of an element of this . Example usage: >>> global_batch_size = 16 >>> strategy = tf.distribute.MirroredStrategy([\"GPU:0\", \"GPU:1\"]) >>> dataset = tf.data.Dataset.from_tensors(([1.],[2])).repeat(100).batch(global_batch_size) >>> dist_dataset = strategy.experimental_distribute_dataset(dataset) >>> dist_dataset.element_spec (PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.float32, name=None), TensorSpec(shape=(None, 1), dtype=tf.float32, name=None)), PerReplicaSpec(TensorSpec(shape=(None, 1), dtype=tf.int32, name=None), TensorSpec(shape=(None, 1), dtype=tf.int32, name=None))) Returns: A nested structure of objects matching the structure of an element of this . This returned value is typically a object and specifies the of individual components.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\types\\distribute.py", - "ast_data": "FunctionDef name:element_spec arguments arg:self Raise raises:NotImplementedError('DistributedDataset.element_spec must be implemented in descendants.')" - }, - { - "library": "scipy", - "name": "mean", - "source_code": "def mean(self, row, col): r, c, n = self._process_parameters(row, col) return np.outer(r, c) / n", - "docstring": "Mean of distribution of conditional tables. %(_doc_mean_params)s Returns ------- mean: ndarray Mean of the distribution. Notes ----- %(_doc_row_col_note)s Examples -------- >>> from scipy.stats import random_table >>> row = [1, 5] >>> col = [2, 3, 1] >>> random_table.mean(row, col) array([[0.33333333, 0.5 , 0.16666667], [1.66666667, 2.5 , 0.83333333]]) Alternatively, the object may be called (as a function) to fix the row and column vector sums, returning a \"frozen\" distribution. >>> d = random_table(row, col) >>> d.mean() array([[0.33333333, 0.5 , 0.16666667], [1.66666667, 2.5 , 0.83333333]])", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_multivariate.py", - "ast_data": "FunctionDef name:mean arguments arg:self arg:row arg:col Assign Call call:_process_parameters Return return:yes" - }, - { - "library": "pytorch", - "name": "save", - "source_code": "def save(self, destination: str | os.PathLike, *, include_initializers: bool = True, keep_initializers_as_inputs: bool = False, external_data: bool | None = None): original_initializers = copy.copy(self.model.graph.initializers) original_inputs = copy.copy(self.model.graph.inputs) if not include_initializers: self.model.graph.initializers.clear() if keep_initializers_as_inputs: self.model.graph.inputs.extend(original_initializers.values()) try: if external_data or _count_initializer_size(self.model.graph) > _LARGE_MODEL_THRESHOLD: onnxscript_apis.save_model_with_external_data(self.model, destination) else: ir.save(self.model, destination) finally: if not include_initializers: self.model.graph.initializers.update(original_initializers) if keep_initializers_as_inputs: self.model.graph.inputs.clear() self.model.graph.inputs.extend(original_inputs)", - "docstring": "Save the ONNX model to the specified destination. When `True` is not a file path.", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_onnx_program.py", - "ast_data": "FunctionDef name:save arguments arg:self arg:destination type:str | os.PathLike Assign Call call:copy Assign Call call:copy If If Try If BoolOp Compare op:Gt If If" - }, - { - "library": "matplotlib", - "name": "get_draggable", - "source_code": "def get_draggable(self): return self._draggable is not None", - "docstring": "Return `` otherwise.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\legend.py", - "ast_data": "FunctionDef name:get_draggable arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "from_sparse_tensor_slices", - "source_code": "@staticmethod @deprecation.deprecated(None, 'Use `tf.data.Dataset.from_tensor_slices()`.') def from_sparse_tensor_slices(sparse_tensor): from tensorflow.python.data.ops import from_sparse_tensor_slices_op return from_sparse_tensor_slices_op._from_sparse_tensor_slices(sparse_tensor)", - "docstring": "Splits each rank-N in this dataset row-wise. Args: sparse_tensor: A . Returns: Dataset: A of rank-(N-1) sparse tensors.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\dataset_ops.py", - "ast_data": "FunctionDef name:from_sparse_tensor_slices arguments arg:sparse_tensor Call call:deprecated Return return:yes" - }, - { - "library": "scipy", - "name": "derivative", - "source_code": "def derivative(self, nu = 1): if nu < 0: return self.antiderivative(-nu) if nu = = 0: c2 = self.c.copy() else: c2 = self.c[: -nu, :].copy() if c2.shape[0] = = 0: c2 = np.zeros((1,) + c2.shape[1:], dtype = c2.dtype) factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu) c2 * = factor[(slice(None),) + (None,) * (c2.ndim - 1)] return self.construct_fast(c2, self.x, self.extrapolate, self.axis)", - "docstring": "Construct a new piecewise polynomial representing the derivative. Parameters ---------- nu : int, optional Order of derivative to evaluate. Default is 1, i.e., compute the first derivative. If negative, the antiderivative is returned. Returns ------- pp : PPoly Piecewise polynomial of order k2 = k - n representing the derivative of this polynomial. Notes ----- Derivatives are evaluated piecewise for each polynomial segment, even if the polynomial is not differentiable at the breakpoints. The polynomial intervals are considered half-open, ``.", - "type": "method", - "file_path": "scipy\\scipy\\interpolate\\_interpolate.py", - "ast_data": "FunctionDef name:derivative arguments arg:self arg:nu If Compare op:Lt Return return:yes If Compare op:Eq Assign Call call:copy Assign Call call:copy If Compare op:Eq Assign Call call:zeros Assign Call call:poch Return return:yes" - }, - { - "library": "django", - "name": "CallbackFilter", - "source_code": "class CallbackFilter(logging.Filter): def __init__(self, callback): self.callback = callback def filter(self, record): if self.callback(record): return 1 return 0", - "docstring": "A logging filter that checks the return value of a given callable (which takes the record-to-be-logged as its only parameter) to decide whether to log a record.", - "type": "class", - "file_path": "django\\django\\utils\\log.py", - "ast_data": "ClassDef name:CallbackFilter FunctionDef name:__init__ arguments arg:self arg:callback Assign FunctionDef name:filter arguments arg:self arg:record If Call call:callback Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "t0", - "source_code": "@property def t0(self): return self._t0", - "docstring": "Absolute timestamp of the first dumped tensor across all devices. Returns: () absolute timestamp of the first dumped tensor, in microseconds.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", - "ast_data": "FunctionDef name:t0 arguments arg:self Return return:yes" - }, - { - "library": "mongo", - "name": "encrypt", - "source_code": "def encrypt(self, value: Any, algorithm: str, key_id: Optional[Union[Binary, uuid.UUID]] = None, key_alt_name: Optional[str] = None, query_type: Optional[str] = None, contention_factor: Optional[int] = None, range_opts: Optional[RangeOpts] = None) -> Binary: return cast(Binary, self._encrypt_helper(value = value, algorithm = algorithm, key_id = key_id, key_alt_name = key_alt_name, query_type = query_type, contention_factor = contention_factor, range_opts = range_opts, is_expression = False))", - "docstring": "Encrypt a BSON value with a given key and algorithm. Note that exactly one of ` (string): The encryption algorithm to use. See :class: for some valid options. :param key_id: Identifies a data key by `~bson.binary.Binary~bson.binary.UUID_SUBTYPE (str): The query type to execute. See :class: for valid options. :param contention_factorAlgorithm.INDEXEDAlgorithm.INDEXEDrangeRangeOpts~bson.binary.Binaryrange_optsuuid.UUIDquery_typecontention_factor` parameters.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\encryption.py", - "ast_data": "FunctionDef name:encrypt arguments arg:self arg:value type:Any arg:algorithm type:str arg:key_id type:Optional[Union[Binary, uuid.UUID]] arg:key_alt_name type:Optional[str] arg:query_type type:Optional[str] arg:contention_factor type:Optional[int] arg:range_opts type:Optional[RangeOpts] Return return:yes" - }, - { - "library": "pytorch", - "name": "kaiming_uniform_", - "source_code": "def kaiming_uniform_(tensor: Tensor, a: float = 0, mode: str = 'fan_in', nonlinearity: str = 'leaky_relu', generator: _Optional[torch.Generator] = None): if torch.overrides.has_torch_function_variadic(tensor): return torch.overrides.handle_torch_function(kaiming_uniform_, (tensor,), tensor = tensor, a = a, mode = mode, nonlinearity = nonlinearity, generator = generator) if 0 in tensor.shape: warnings.warn('Initializing zero-element tensors is a no-op') return tensor fan = _calculate_correct_fan(tensor, mode) gain = calculate_gain(nonlinearity, a) std = gain / math.sqrt(fan) bound = math.sqrt(3.0) * std with torch.no_grad(): return tensor.uniform_(-bound, bound, generator = generator)", - "docstring": "Fill the input with values using a Kaiming uniform distribution. The method is described in - He, K. et al. (2015). The resulting tensor will have values sampled from :math: where .. math:: \\text{bound} = \\text{gain} \\times \\sqrt{\\frac{3}{\\text{fan\\_mode}}} Also known as He initialization. Args: tensor: an n-dimensional a: the negative slope of the rectifier used after this layer (only used with `nn.functional`.", - "type": "function", - "file_path": "pytorch\\torch\\nn\\init.py", - "ast_data": "FunctionDef name:kaiming_uniform_ arguments arg:tensor type:Tensor arg:a type:float arg:mode type:str arg:nonlinearity type:str arg:generator type:_Optional[torch.Generator] If Call call:has_torch_function_variadic Return return:yes If Compare op:In Return return:yes Assign Call call:_calculate_correct_fan Assign Call call:calculate_gain Assign Assign With Return return:yes" - }, - { - "library": "pytorch", - "name": "best_probas_and_indices", - "source_code": "def best_probas_and_indices(class_probas: Any) -> str: probas_indices_sorted = sorted([(proba, index) for index, proba in enumerate(class_probas) if proba > 0], key = lambda x: x[0], reverse = True) probas_indices_sorted_str = ', '.join((f'({value: .3f}, {index})' for value, index in probas_indices_sorted)) return f'[{probas_indices_sorted_str}]'", - "docstring": "Given a list of tuples (proba, idx), this function returns a string in which the tuples are sorted by proba in descending order. E.g.: Given class_probas=[(0.3, 0), (0.5, 1), (0.2, 2)] this function returns \"[(0.5, 1), (0.3, 0), (0.2, 2)]\"", - "type": "method", - "file_path": "pytorch\\torchgen\\_autoheuristic\\ah_tree.py", - "ast_data": "FunctionDef name:best_probas_and_indices arguments arg:class_probas type:Any Assign Call call:sorted Assign Call call:join Return return:yes" - }, - { - "library": "numpy", - "name": "as_complex", - "source_code": "def as_complex(real, imag = 0): return Expr(Op.COMPLEX, (as_expr(real), as_expr(imag)))", - "docstring": "Return object as COMPLEX expression (complex literal constant).", - "type": "function", - "file_path": "numpy\\numpy\\f2py\\symbolic.py", - "ast_data": "FunctionDef name:as_complex arguments arg:real arg:imag Return return:yes" - }, - { - "library": "mongo", - "name": "mark_command", - "source_code": "async def mark_command(self, database: str, cmd: bytes) -> bytes: if not self._spawned and (not self.opts._mongocryptd_bypass_spawn): self.spawn() inflated_cmd = _inflate_bson(cmd, DEFAULT_RAW_BSON_OPTIONS) assert self.mongocryptd_client is not None try: res = await self.mongocryptd_client[database].command(inflated_cmd, codec_options = DEFAULT_RAW_BSON_OPTIONS) except ServerSelectionTimeoutError: if self.opts._mongocryptd_bypass_spawn: raise self.spawn() res = await self.mongocryptd_client[database].command(inflated_cmd, codec_options = DEFAULT_RAW_BSON_OPTIONS) return res.raw", - "docstring": "Mark a command for encryption. :param database: The database on which to run this command. :param cmd: The BSON command to run. :return: The marked command response from mongocryptd.", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\encryption.py", - "ast_data": "AsyncFunctionDef name:mark_command arguments arg:self arg:database type:str arg:cmd type:bytes If BoolOp Assign Call call:_inflate_bson Try Assign ExceptHandler If Raise Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_input_arrays", - "source_code": "def get_input_arrays(self): if self._has_valid_tensors(): return [_get_tensor_name(tensor) for tensor in self._input_tensors] else: return [name for name, _ in self._input_arrays_with_shape]", - "docstring": "Returns a list of the names of the input tensors. Returns: List of strings.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", - "ast_data": "FunctionDef name:get_input_arrays arguments arg:self If Call call:_has_valid_tensors Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "device_memory_used", - "source_code": "def device_memory_used(device: Optional[Union[Device, int]] = None) -> int: if not torch.version.hip: handle = _get_pynvml_handler() device = _get_nvml_device_index(device) handle = pynvml.nvmlDeviceGetHandleByIndex(device) return pynvml.nvmlDeviceGetMemoryInfo(handle).used else: return _get_amdsmi_device_memory_used(device)", - "docstring": "Return used global (device) memory in bytes as given by or . Args: device (torch.device or int, optional): selected device. Returns statistic for the current device, given by :func:, if :attr: is `` (default).", - "type": "function", - "file_path": "pytorch\\torch\\cuda\\__init__.py", - "ast_data": "FunctionDef name:device_memory_used arguments arg:device type:Optional[Union[Device, int]] If Assign Call call:_get_pynvml_handler Assign Call call:_get_nvml_device_index Assign Call call:nvmlDeviceGetHandleByIndex Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "reduce_add", - "source_code": "def reduce_add(inputs, destination = None): destination = _get_device_index(destination, optional = True) input_size = inputs[0].size() root_index = None for i, inp in enumerate(inputs): assert inp.device.type ! = 'cpu', 'reduce_add expects all inputs to be on GPUs' if inp.get_device() = = destination: root_index = i if inp.size() ! = input_size: got = 'x'.join((str(x) for x in inp.size())) expected = 'x'.join((str(x) for x in input_size)) raise ValueError(f'input {i} has invalid size: got {got}, but expected {expected}') if root_index is None: raise RuntimeError('reduce_add expects destination to be on the same GPU with one of the tensors') if len(inputs) = = 1: return inputs[0] if nccl.is_available(inputs): result = torch.empty_like(inputs[root_index]) nccl.reduce(inputs, output = result, root = root_index) else: destination_device = torch.device(inputs[root_index].device.type, destination) nonroot = [t for i, t in enumerate(inputs) if i ! = root_index] result = inputs[root_index] + nonroot[0].to(device = destination_device, non_blocking = True) for other in nonroot[1:]: result.add_(other.to(device = destination_device, non_blocking = True)) return result", - "docstring": "Sum tensors from multiple GPUs. All inputs should have matching shapes, dtype, and layout. The output tensor will be of the same shape, dtype, and layout. Args: inputs (Iterable[Tensor]): an iterable of tensors to add. destination (int, optional): a device on which the output will be placed (default: current device). Returns: A tensor containing an elementwise sum of all inputs, placed on the :attr: device.", - "type": "function", - "file_path": "pytorch\\torch\\nn\\parallel\\comm.py", - "ast_data": "FunctionDef name:reduce_add arguments arg:inputs arg:destination Assign Call call:_get_device_index Assign Call call:size Assign For Call call:enumerate If Compare op:Eq Assign If Compare op:NotEq Assign Call call:join Assign Call call:join Raise raises:ValueError(f'input {i} has invalid size: got {got}, but expected {expected}') If Compare op:Is Raise raises:RuntimeError('reduce_add expects destination to be on the same GPU with one of the tensors') If Compare op:Eq Return return:yes If Call call:is_available Assign Call call:empty_like Assign Call call:device Assign Assign For Return return:yes" - }, - { - "library": "pygame", - "name": "update", - "source_code": "def update(self, *args, **kwargs): pass", - "docstring": "method to control sprite behavior Sprite.update(*args, **kwargs): The default implementation of this method does nothing; it's just a convenient \"hook\" that you can override. This method is called by Group.update() with whatever arguments you give it. There is no need to use this method if not using the convenience method by the same name in the Group class.", - "type": "method", - "file_path": "pygame\\src_py\\sprite.py", - "ast_data": "FunctionDef name:update arguments arg:self vararg:args kwarg:kwargs" - }, - { - "library": "matplotlib", - "name": "view_limits", - "source_code": "def view_limits(self, vmin, vmax): return mtransforms.nonsingular(vmin, vmax)", - "docstring": "Select a scale for the range from vmin to vmax. Subclasses should override this method to change locator behaviour.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", - "ast_data": "FunctionDef name:view_limits arguments arg:self arg:vmin arg:vmax Return return:yes" - }, - { - "library": "django", - "name": "Trans", - "source_code": "class Trans: def __getattr__(self, real_name): from django.conf import settings if settings.USE_I18N: from django.utils.translation import trans_real as trans from django.utils.translation.reloader import translation_file_changed, watch_for_translation_changes autoreload_started.connect(watch_for_translation_changes, dispatch_uid = 'translation_file_changed') file_changed.connect(translation_file_changed, dispatch_uid = 'translation_file_changed') else: from django.utils.translation import trans_null as trans setattr(self, real_name, getattr(trans, real_name)) return getattr(trans, real_name)", - "docstring": "The purpose of this class is to store the actual translation function upon receiving the first call to that function. After this is done, changes to USE_I18N will have no effect to which function is served upon request. If your tests rely on changing USE_I18N, you can delete all the functions from _trans.__dict__. Note that storing the function with setattr will have a noticeable performance effect, as access to the function goes the normal path, instead of using __getattr__.", - "type": "class", - "file_path": "django\\django\\utils\\translation\\__init__.py", - "ast_data": "ClassDef name:Trans FunctionDef name:__getattr__ arguments arg:self arg:real_name If Return return:yes" - }, - { - "library": "django", - "name": "make_valid", - "source_code": "def make_valid(self): return GEOSGeometry(capi.geos_makevalid(self.ptr), srid = self.srid)", - "docstring": "Attempt to create a valid representation of a given invalid geometry without losing any of the input vertices.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", - "ast_data": "FunctionDef name:make_valid arguments arg:self Return return:yes" - }, - { - "library": "authlib", - "name": "validate_request_uri_parameter_supported", - "source_code": "def validate_request_uri_parameter_supported(self): _validate_boolean_value(self, 'request_uri_parameter_supported')", - "docstring": "OPTIONAL. Boolean value specifying whether the OP supports use of the request_uri parameter, with true indicating support. If omitted, the default value is true.", - "type": "method", - "file_path": "authlib\\authlib\\oidc\\discovery\\models.py", - "ast_data": "FunctionDef name:validate_request_uri_parameter_supported arguments arg:self" - }, - { - "library": "matplotlib", - "name": "rc_file_defaults", - "source_code": "def rc_file_defaults(): with _api.suppress_matplotlib_deprecation_warning(): from .style.core import STYLE_BLACKLIST rcParams.update({k: rcParamsOrig[k] for k in rcParamsOrig if k not in STYLE_BLACKLIST})", - "docstring": "Restore the from the original rc file loaded by Matplotlib. Style-blacklisted (defined in ``) are not updated.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\__init__.py", - "ast_data": "FunctionDef name:rc_file_defaults arguments With" - }, - { - "library": "matplotlib", - "name": "__call__", - "source_code": "def __call__(self, x, pos = None): raise NotImplementedError('Derived must override')", - "docstring": "Return the format for tick value *x* at position pos. `` indicates an unspecified location.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:x arg:pos Raise raises:NotImplementedError('Derived must override')" - }, - { - "library": "tensorflow", - "name": "device_name", - "source_code": "@property def device_name(self): return self._device_name", - "docstring": "Name of the device that the tensor belongs to. Returns: () device name.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", - "ast_data": "FunctionDef name:device_name arguments arg:self Return return:yes" - }, - { - "library": "mongo", - "name": "__init__", - "source_code": "def __init__(self, filter: Mapping[str, Any], collation: Optional[_CollationIn] = None, hint: Optional[_IndexKeyHint] = None, namespace: Optional[str] = None) -> None: super().__init__(filter, collation, hint, namespace)", - "docstring": "Create a DeleteMany instance. For use with :meth:, :meth:, :meth: and :meth:. :param filter: A query that matches the documents to delete. :param collation: An instance of :class:. :param hint: An index to use to support the query predicate specified either by its string name, or in the same format as passed to :meth: or :meth: (e.g. `namespaceMongoClient.bulk_writecollation` option.", - "type": "method", - "file_path": "mongo\\pymongo\\operations.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:filter type:Mapping[str, Any] arg:collation type:Optional[_CollationIn] arg:hint type:Optional[_IndexKeyHint] arg:namespace type:Optional[str]" - }, - { - "library": "flexx", - "name": "add_shared_data", - "source_code": "def add_shared_data(self, name, data): if not isinstance(name, str): raise TypeError('add_shared_data() name must be a str.') if name in self._data: raise ValueError('add_shared_data() got existing name %r.' % name) if not isinstance(data, bytes): raise TypeError('add_shared_data() data must be bytes.') self._data[name] = data return 'flexx/data/shared/%s' % name", - "docstring": "Add data to serve to the client (e.g. images), which is shared between sessions. It is an error to add data with a name that is already registered. See `` to set data per-session and use actions to send data to JsComponent objects directly. Parameters: name (str): the name of the data, e.g. 'icon.png'. data (bytes): the data blob. Returns: str: the (relative) url at which the data can be retrieved.", - "type": "method", - "file_path": "flexx\\flexx\\app\\_assetstore.py", - "ast_data": "FunctionDef name:add_shared_data arguments arg:self arg:name arg:data If Raise raises:TypeError('add_shared_data() name must be a str.') If Compare op:In Raise raises:ValueError('add_shared_data() got existing name %r.' % name) If Raise raises:TypeError('add_shared_data() data must be bytes.') Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "make_eager_backend_with_torch_function_modes", - "source_code": "def make_eager_backend_with_torch_function_modes(modes): from contextlib import ExitStack def fn(gm, fake_tensor_inputs, **kwargs): stack = ExitStack() for mode in modes: stack.enter_context(mode) result = gm.forward stack.close() return result return fn", - "docstring": "Used to trace HOPs (cond and while) for eager exectution, the metadata TF mode mutates vars outside of the scope of the HOP, and we can't have graph breaks in the HOP, so we need to externally run this mode and not trace it.", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\backends\\debugging.py", - "ast_data": "FunctionDef name:make_eager_backend_with_torch_function_modes arguments arg:modes FunctionDef name:fn arguments arg:gm arg:fake_tensor_inputs kwarg:kwargs Assign Call call:ExitStack For Assign Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "create", - "source_code": "def create(self): raise NotImplementedError('subclasses of SessionBase must provide a create() method')", - "docstring": "Create a new session instance. Guaranteed to create a new object with a unique key and will have saved the result once (with empty data) before the method returns.", - "type": "method", - "file_path": "django\\django\\contrib\\sessions\\backends\\base.py", - "ast_data": "FunctionDef name:create arguments arg:self Raise raises:NotImplementedError('subclasses of SessionBase must provide a create() method')" - }, - { - "library": "django", - "name": "SwappableTuple", - "source_code": "class SwappableTuple(tuple): def __new__(cls, value, setting): self = tuple.__new__(cls, value) self.setting = setting return self", - "docstring": "Subclass of tuple so Django can tell this was originally a swappable dependency when it reads the migration file.", - "type": "class", - "file_path": "django\\django\\db\\migrations\\migration.py", - "ast_data": "ClassDef name:SwappableTuple FunctionDef name:__new__ arguments arg:cls arg:value arg:setting Assign Call call:__new__ Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "on_train_batch_end", - "source_code": "@doc_controls.for_subclass_implementers @generic_utils.default def on_train_batch_end(self, batch, logs = None): self.on_batch_end(batch, logs = logs)", - "docstring": "Called at the end of a training batch in methods. Subclasses should override for any actions to run. Note that if the argument to in is set to , this method will only be called every batches. Args: batch: Integer, index of batch within the current epoch. logs: Dict. Aggregated metric results up until this batch.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", - "ast_data": "FunctionDef name:on_train_batch_end arguments arg:self arg:batch arg:logs" - }, - { - "library": "matplotlib", - "name": "set_sketch_params", - "source_code": "def set_sketch_params(self, scale = None, length = None, randomness = None): self._sketch = None if scale is None else (scale, length or 128.0, randomness or 16.0)", - "docstring": "Set the sketch parameters. Parameters ---------- scale : float, optional The amplitude of the wiggle perpendicular to the source line, in pixels. If scale is , or not provided, no sketch filter will be provided. length : float, default: 128 The length of the wiggle along the line, in pixels. randomness : float, default: 16 The scale factor by which the length is shrunken or expanded.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", - "ast_data": "FunctionDef name:set_sketch_params arguments arg:self arg:scale arg:length arg:randomness Assign" - }, - { - "library": "pytorch", - "name": "__new__", - "source_code": "def __new__(cls, *args, **kwargs): orig_cls = cls.__mro__[2] return orig_cls.__new__(orig_cls, *args, **kwargs)", - "docstring": "Override `` to remove the DDP class and directly construct the original class for cases like indexing into a container module.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\_composable\\replicate.py", - "ast_data": "FunctionDef name:__new__ arguments arg:cls vararg:args kwarg:kwargs Assign Return return:yes" - }, - { - "library": "scipy", - "name": "backtick_repl", - "source_code": "def backtick_repl(matchobj): if matchobj.group(2) ! = ' ': post = '\\\\ ' + matchobj.group(2) else: post = matchobj.group(2) return '``' + matchobj.group(1) + '``' + post", - "docstring": "repl to add an escaped space following a code block if needed", - "type": "function", - "file_path": "scipy\\tools\\gh_lists.py", - "ast_data": "FunctionDef name:backtick_repl arguments arg:matchobj If Compare op:NotEq Assign Assign Call call:group Return return:yes" - }, - { - "library": "tensorflow", - "name": "metrics", - "source_code": "@property def metrics(self): metrics = [] if self._is_compiled: if self.compiled_loss is not None: metrics + = self.compiled_loss.metrics if self.compiled_metrics is not None: metrics + = self.compiled_metrics.metrics for l in self._flatten_layers(): metrics.extend(l._metrics) return metrics", - "docstring": "Returns the model's metrics added using , APIs. Note: Metrics passed to are available only after a has been trained/evaluated on actual data. Examples: >>> inputs = tf.keras.layers.Input(shape=(3,)) >>> outputs = tf.keras.layers.Dense(2)(inputs) >>> model = tf.keras.models.Model(inputs=inputs, outputs=outputs) >>> model.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"mae\"]) >>> [m.name for m in model.metrics] [] >>> x = np.random.random((2, 3)) >>> y = np.random.randint(0, 2, (2, 2)) >>> model.fit(x, y) >>> [m.name for m in model.metrics] ['loss', 'mae'] >>> inputs = tf.keras.layers.Input(shape=(3,)) >>> d = tf.keras.layers.Dense(2, name='out') >>> output_1 = d(inputs) >>> output_2 = d(inputs) >>> model = tf.keras.models.Model( ... inputs=inputs, outputs=[output_1, output_2]) >>> model.add_metric( ... tf.reduce_sum(output_2), name='mean', aggregation='mean') >>> model.compile(optimizer=\"Adam\", loss=\"mse\", metrics=[\"mae\", \"acc\"]) >>> model.fit(x, (y, y)) >>> [m.name for m in model.metrics] ['loss', 'out_loss', 'out_1_loss', 'out_mae', 'out_acc', 'out_1_mae', 'out_1_acc', 'mean']", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training.py", - "ast_data": "FunctionDef name:metrics arguments arg:self Assign If If Compare op:IsNot If Compare op:IsNot For Call call:_flatten_layers Return return:yes" - }, - { - "library": "pandas", - "name": "apply", - "source_code": "@staticmethod @abc.abstractmethod def apply(data: Series | DataFrame | np.ndarray, func: AggFuncType, args: tuple, kwargs: dict[str, Any], decorator: Callable, axis: Axis): pass", - "docstring": "Executor method to run functions by an axis. While we can see `` is implemented accordingly.", - "type": "method", - "file_path": "pandas\\pandas\\core\\apply.py", - "ast_data": "FunctionDef name:apply arguments arg:data type:Series | DataFrame | np.ndarray arg:func type:AggFuncType arg:args type:tuple arg:kwargs type:dict[str, Any] arg:decorator type:Callable arg:axis type:Axis" - }, - { - "library": "seaborn", - "name": "convert_units", - "source_code": "def convert_units(self, x): if np.issubdtype(np.asarray(x).dtype, np.number): return x elif self.converter is None: return x return self.converter.convert(x, self.units, self)", - "docstring": "Return a numeric representation of the input data.", - "type": "method", - "file_path": "seaborn\\seaborn\\_core\\scales.py", - "ast_data": "FunctionDef name:convert_units arguments arg:self arg:x If Call call:issubdtype Return return:yes If Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_radio_props", - "source_code": "def set_radio_props(self, props): _api.check_isinstance(dict, props = props) if 's' in props: props['sizes'] = np.broadcast_to(props.pop('s'), len(self.labels)) self._buttons.update(props) self._active_colors = self._buttons.get_facecolor() if len(self._active_colors) = = 1: self._active_colors = np.repeat(self._active_colors, len(self.labels), axis = 0) self._buttons.set_facecolor([activecolor if text.get_text() = = self.value_selected else 'none' for text, activecolor in zip(self.labels, self._active_colors)])", - "docstring": "Set properties of the labels. .. versionadded:: 3.7 Parameters ---------- props : dict Dictionary of properties to be used for the radio buttons.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", - "ast_data": "FunctionDef name:set_radio_props arguments arg:self arg:props If Compare op:In Assign Call call:broadcast_to Assign Call call:get_facecolor If Compare op:Eq Assign Call call:repeat" - }, - { - "library": "scrapy", - "name": "stop", - "source_code": "def stop(self) -> Deferred[Any]: return self._stop()", - "docstring": "Stops simultaneously all the crawling jobs taking place. Returns a deferred that is fired when they all have ended.", - "type": "method", - "file_path": "scrapy\\scrapy\\crawler.py", - "ast_data": "FunctionDef name:stop arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "register_custom_opdefs", - "source_code": "def register_custom_opdefs(custom_opdefs_list): return wrap_converter.wrapped_register_custom_opdefs(custom_opdefs_list)", - "docstring": "Register the given custom opdefs to the TensorFlow global op registry. Args: custom_opdefs_list: String representing the custom ops OpDefs that are included in the GraphDef. Returns: True if the registration is successfully completed.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\lite\\python\\convert.py", - "ast_data": "FunctionDef name:register_custom_opdefs arguments arg:custom_opdefs_list Return return:yes" - }, - { - "library": "scipy", - "name": "Corana", - "source_code": "class Corana(Benchmark): def __init__(self, dimensions = 4): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N)) self.global_optimum = [[0 for _ in range(self.N)]] self.fglob = 0.0 def fun(self, x, *args): self.nfev + = 1 d = [1.0, 1000.0, 10.0, 100.0] r = 0 for j in range(4): zj = floor(abs(x[j] / 0.2) + 0.49999) * sign(x[j]) * 0.2 if abs(x[j] - zj) < 0.05: r + = 0.15 * (zj - 0.05 * sign(zj)) ** 2 * d[j] else: r + = d[j] * x[j] * x[j] return r", - "docstring": "Corana objective function. This class defines the Corana [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Corana}}(x) = \\begin{cases} \\sum_{i=1}^n 0.15 d_i [z_i - 0.05\\textrm{sgn}(z_i)]^2 & \\textrm{if }|x_i-z_i| < 0.05 \\\\ d_ix_i^2 & \\textrm{otherwise}\\end{cases} Where, in this exercise: .. math:: z_i = 0.2 \\lfloor |x_i/s_i|+0.49999\\rfloor\\textrm{sgn}(x_i), d_i=(1,1000,10,100, ...) with :math: for :math:. *Global optimum*: :math: for :math: for :math: ..[1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_C.py", - "ast_data": "ClassDef name:Corana FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Assign Assign For Call call:range Assign If Compare op:Lt Return return:yes" - }, - { - "library": "pytorch", - "name": "build_metadata", - "source_code": "@abstractmethod def build_metadata(self, tensor_sizes: torch.Size, tensor_properties: sharded_tensor_meta.TensorProperties) -> sharded_tensor_meta.ShardedTensorMetadata: pass", - "docstring": "Given a global tensor size, define how to shard a tensor like this shape across ranks, return ShardedTensorMetadata Args: tensor_sizes (:class:): The tensor shape to shard on, a object that represents the tensor shape to be sharded according to the ShardingSpec. tensor_properties(:class:ShardedTensorMetadata` object that encodes the information about the layout of the ShardedTensor and its properties.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\api.py", - "ast_data": "FunctionDef name:build_metadata arguments arg:self arg:tensor_sizes type:torch.Size arg:tensor_properties type:sharded_tensor_meta.TensorProperties" - }, - { - "library": "pytorch", - "name": "replace_all_batch_norm_modules_", - "source_code": "@exposed_in('torch.func') def replace_all_batch_norm_modules_(root: nn.Module) -> nn.Module: batch_norm_without_running_stats(root) for obj in root.modules(): batch_norm_without_running_stats(obj) return root", - "docstring": "In place updates :attr: by setting the `root`", - "type": "function", - "file_path": "pytorch\\torch\\_functorch\\batch_norm_replacement.py", - "ast_data": "FunctionDef name:replace_all_batch_norm_modules_ arguments arg:root type:nn.Module Call call:exposed_in For Call call:modules Return return:yes" - }, - { - "library": "tensorflow", - "name": "on_session_init", - "source_code": "@abc.abstractmethod def on_session_init(self, request): pass", - "docstring": "Callback invoked during construction of the debug-wrapper session. This is a blocking callback. The invocation happens right before the constructor ends. Args: request: () callback request carrying information such as the session being wrapped. Returns: An instance of .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py", - "ast_data": "FunctionDef name:on_session_init arguments arg:self arg:request" - }, - { - "library": "pytorch", - "name": "print_n_shadows_summary", - "source_code": "def print_n_shadows_summary(results_comparison) -> None: try: from tabulate import tabulate except ImportError: print('`print_tabular` relies on the library `tabulate`, which could not be found on this machine. Run `pip install tabulate` to install the library.') return results = [] for subgraph_data in results_comparison.values(): mean_all_candidates = [candidate['cmp_mean'] for candidate_name, candidate in subgraph_data['candidates'].items()] data_row = [subgraph_data['ref_node_name'], subgraph_data['ref_node_target_type'], subgraph_data['fqn'], *mean_all_candidates] results.append(data_row) max_candidate_idx_len = -1 for data_row in results: max_candidate_idx_len = max(max_candidate_idx_len, len(data_row[1])) candidate_idx_headers = [str(x) for x in range(max_candidate_idx_len)] headers = ['node_name', 'node_type', 'fqn', *candidate_idx_headers] print(tabulate(results, headers = headers))", - "docstring": "Input: { 'subgraph_0': { 'ref_node_name': 'linear1', 'ref_node_target_type': '...', 'fqn': '...', 'candidates': { '1': { 'qconfig_str': ..., 'comparison_fn_name': ..., 'cmp_raw': [45.0, 55.0], 'cmp_mean': 50.0, }, ..., }, }, } Prints: node_name | node_type | fqn | 0 | 1 | ... linear1 | ... | ... | 45.0 | 50.0 | ...", - "type": "function", - "file_path": "pytorch\\torch\\ao\\ns\\fx\\n_shadows_utils.py", - "ast_data": "FunctionDef name:print_n_shadows_summary arguments arg:results_comparison Try ExceptHandler Return return:no Assign For Call call:values Assign Assign Assign For Assign Call call:max Assign Assign" - }, - { - "library": "tensorflow", - "name": "Location", - "source_code": "class Location(collections.namedtuple('Location', ('filename', 'lineno', 'col_offset'))): @property def line_loc(self): return LineLocation(self.filename, self.lineno)", - "docstring": "Encodes code location information. Attributes: filename: Text lineno: int, 1-based col_offset: int line_loc: LineLocation", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\origin_info.py", - "ast_data": "ClassDef name:Location Call call:namedtuple FunctionDef name:line_loc arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "elapsed_time", - "source_code": "def elapsed_time(self, end_event: 'Event') -> float: return torch._C._mps_elapsedTimeOfEvents(self.__eventId, end_event.__eventId)", - "docstring": "Returns the time elapsed in milliseconds after the event was recorded and before the end_event was recorded.", - "type": "method", - "file_path": "pytorch\\torch\\mps\\event.py", - "ast_data": "FunctionDef name:elapsed_time arguments arg:self arg:end_event type:'Event' Return return:yes" - }, - { - "library": "kornia", - "name": "compute_subpixel_step", - "source_code": "def compute_subpixel_step(self) -> Tensor: delta_d = 0.01 xy_m1 = self._compute_projection(self.width / 2, self.height / 2, 1.0 - delta_d) xy_p1 = self._compute_projection(self.width / 2, self.height / 2, 1.0 + delta_d) dx = torch.norm(xy_p1 - xy_m1, 2, dim = -1) / 2.0 dxdd = dx / delta_d return torch.min(0.5 / dxdd)", - "docstring": "Compute the inverse depth step for sub pixel accurate sampling of the depth cost volume, per camera. Szeliski, Richard, and Daniel Scharstein. \"Symmetric sub-pixel stereo matching.\" European Conference on Computer Vision. Springer Berlin Heidelberg, 2002.", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\depth.py", - "ast_data": "FunctionDef name:compute_subpixel_step arguments arg:self Assign Assign Call call:_compute_projection Assign Call call:_compute_projection Assign Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "adjust_compatible", - "source_code": "@property def adjust_compatible(self): if self._adjust_compatible is None: raise NotImplementedError return self._adjust_compatible", - "docstring": "Return a boolean if the layout engine is compatible with .", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py", - "ast_data": "FunctionDef name:adjust_compatible arguments arg:self If Compare op:Is Raise raises:NotImplementedError Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_stats_for_node_def", - "source_code": "def get_stats_for_node_def(graph, node, statistic_type) -> Any: try: stats_func = _stats_registry.lookup(node.op + ', ' + statistic_type) result = stats_func(graph, node) except LookupError: result = OpStats(statistic_type) return result", - "docstring": "Looks up the node's statistics function in the registry and calls it. This function takes a Graph object and a NodeDef from a GraphDef, and if there's an associated statistics method, calls it and returns a result. If no function has been registered for the particular node type, it returns an empty statistics object. Args: graph: A Graph object that's been set up with the node's graph. node: A NodeDef describing the operator. statistic_type: A string identifying the statistic we're interested in. Returns: An OpStats object containing information about resource usage.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", - "ast_data": "FunctionDef name:get_stats_for_node_def arguments arg:graph arg:node arg:statistic_type Try Assign Call call:lookup Assign Call call:stats_func ExceptHandler Assign Call call:OpStats Return return:yes" - }, - { - "library": "django", - "name": "clean_ipv6_address", - "source_code": "def clean_ipv6_address(ip_str, unpack_ipv4 = False, error_message = _('This is not a valid IPv6 address.'), max_length = MAX_IPV6_ADDRESS_LENGTH): try: addr = _ipv6_address_from_str(ip_str, max_length) except ValueError: raise ValidationError(error_message, code = 'invalid', params = {'protocol': _('IPv6')}) if unpack_ipv4 and addr.ipv4_mapped: return str(addr.ipv4_mapped) elif addr.ipv4_mapped: return ': : ffff: %s' % str(addr.ipv4_mapped) return str(addr)", - "docstring": "Clean an IPv6 address string. Raise ValidationError if the address is invalid. Replace the longest continuous zero-sequence with \"::\", remove leading zeroes, and make sure all hextets are lowercase. Args: ip_str: A valid IPv6 address. unpack_ipv4: if an IPv4-mapped address is found, return the plain IPv4 address (default=False). error_message: An error message used in the ValidationError. Return a compressed IPv6 address or the same value.", - "type": "function", - "file_path": "django\\django\\utils\\ipv6.py", - "ast_data": "FunctionDef name:clean_ipv6_address arguments arg:ip_str arg:unpack_ipv4 arg:error_message arg:max_length Try Assign Call call:_ipv6_address_from_str ExceptHandler Raise raises:ValidationError(error_message, code='invalid', params={'protocol': _('IPv6')}) If BoolOp Return return:yes If Return return:yes Return return:yes" - }, - { - "library": "numpy", - "name": "libpaths", - "source_code": "def libpaths(paths, bits): if bits not in (32, 64): raise ValueError('Invalid bit size in libpaths: 32 or 64 only') if bits = = 32: return paths out = [] for p in paths: out.extend([p + '64', p]) return out", - "docstring": "Return a list of library paths valid on 32 or 64 bit systems. Inputs: paths : sequence A sequence of strings (typically paths) bits : int An integer, the only valid values are 32 or 64. A ValueError exception is raised otherwise. Examples: Consider a list of directories >>> paths = ['/usr/X11R6/lib','/usr/X11/lib','/usr/lib'] For a 32-bit platform, this is already valid: >>> np.distutils.system_info.libpaths(paths,32) ['/usr/X11R6/lib', '/usr/X11/lib', '/usr/lib'] On 64 bits, we prepend the '64' postfix >>> np.distutils.system_info.libpaths(paths,64) ['/usr/X11R6/lib64', '/usr/X11R6/lib', '/usr/X11/lib64', '/usr/X11/lib', '/usr/lib64', '/usr/lib']", - "type": "function", - "file_path": "numpy\\numpy\\distutils\\system_info.py", - "ast_data": "FunctionDef name:libpaths arguments arg:paths arg:bits If Compare op:NotIn Raise raises:ValueError('Invalid bit size in libpaths: 32 or 64 only') If Compare op:Eq Return return:yes Assign For Return return:yes" - }, - { - "library": "numpy", - "name": "rindex", - "source_code": "@set_module('numpy.strings') def rindex(a, sub, start = 0, end = None): end = end if end is not None else MAX return _rindex_ufunc(a, sub, start, end)", - "docstring": "Like , but raises :exc: when the substring is not found. Parameters ---------- a : array-like, with or dtype sub : array-like, with or dtype start, end : array-like, with any integer dtype, optional Returns ------- out : ndarray Output array of ints. See Also -------- rfind, str.rindex Examples -------- >>> a = np.array([\"Computer Science\"]) >>> np.strings.rindex(a, \"Science\", start=0, end=None) array([9])", - "type": "function", - "file_path": "numpy\\numpy\\_core\\strings.py", - "ast_data": "FunctionDef name:rindex arguments arg:a arg:sub arg:start arg:end Call call:set_module Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "__getitem__", - "source_code": "def __getitem__(self, item): if not self._isinit: self._init() if item = = 0: origin_1_as_int = int(self._origin[1] * self.M) if origin_1_as_int > self.M - 1: origin_1_as_int = self.M - 1 one_d_lut = self._lut[:, origin_1_as_int] new_cmap = ListedColormap(one_d_lut, name = f'{self.name}_0') elif item = = 1: origin_0_as_int = int(self._origin[0] * self.N) if origin_0_as_int > self.N - 1: origin_0_as_int = self.N - 1 one_d_lut = self._lut[origin_0_as_int, :] new_cmap = ListedColormap(one_d_lut, name = f'{self.name}_1') else: raise KeyError(f'only 0 or 1 are valid keys for BivarColormap, not {item!r}') new_cmap._rgba_bad = self._rgba_bad if self.shape in ['ignore', 'circleignore']: new_cmap.set_over(self._rgba_outside) new_cmap.set_under(self._rgba_outside) return new_cmap", - "docstring": "Creates and returns a colorbar along the selected axis", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\colors.py", - "ast_data": "FunctionDef name:__getitem__ arguments arg:self arg:item If If Compare op:Eq Assign Call call:int If Compare op:Gt Assign Assign Assign Call call:ListedColormap If Compare op:Eq Assign Call call:int If Compare op:Gt Assign Assign Assign Call call:ListedColormap Raise raises:KeyError(f'only 0 or 1 are valid keys for BivarColormap, not {item!r}') Assign If Compare op:In Return return:yes" - }, - { - "library": "tensorflow", - "name": "MinMaxNorm", - "source_code": "class MinMaxNorm(Constraint): def __init__(self, min_value = 0.0, max_value = 1.0, rate = 1.0, axis = 0): self.min_value = min_value self.max_value = max_value self.rate = rate self.axis = axis @doc_controls.do_not_generate_docs def __call__(self, w): norms = backend.sqrt(math_ops.reduce_sum(math_ops.square(w), axis = self.axis, keepdims = True)) desired = self.rate * backend.clip(norms, self.min_value, self.max_value) + (1 - self.rate) * norms return w * (desired / (backend.epsilon() + norms)) @doc_controls.do_not_generate_docs def get_config(self): return {'min_value': self.min_value, 'max_value': self.max_value, 'rate': self.rate, 'axis': self.axis}", - "docstring": "MinMaxNorm weight constraint. Constrains the weights incident to each hidden unit to have the norm between a lower bound and an upper bound. Also available via the shortcut function . Args: min_value: the minimum norm for the incoming weights. max_value: the maximum norm for the incoming weights. rate: rate for enforcing the constraint: weights will be rescaled to yield . Effectively, this means that rate=1.0 stands for strict enforcement of the constraint, while rate<1.0 means that weights will be rescaled at each step to slowly move towards a value inside the desired interval. axis: integer, axis along which to calculate weight norms. For instance, in a layer the weight matrix has shape , set to to constrain each weight vector of length . In a layer with , the weight tensor has shape , set to to constrain the weights of each filter tensor of size .", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\keras\\constraints.py", - "ast_data": "ClassDef name:MinMaxNorm FunctionDef name:__init__ arguments arg:self arg:min_value arg:max_value arg:rate arg:axis Assign Assign Assign Assign FunctionDef name:__call__ arguments arg:self arg:w Assign Call call:sqrt Assign Return return:yes FunctionDef name:get_config arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "list_stack", - "source_code": "def list_stack(list_, opts): assert isinstance(opts, ListStackOpts) if isinstance(list_, tensor_array_ops.TensorArray): return _tf_tensorarray_stack(list_) elif tensor_util.is_tf_type(list_): if list_.dtype = = dtypes.variant: return _tf_tensor_list_stack(list_, opts) else: return list_ else: return _py_list_stack(list_, opts)", - "docstring": "The list stack function. This does not have a direct correspondent in Python. The closest idiom to this is tf.append or np.stack. It's different from those in the sense that it accepts a Tensor list, rather than a list of tensors. It can also accept TensorArray. When the target is anything else, the dispatcher will rely on ctx.original_call for fallback. Args: list_: An entity that supports append semantics. opts: A ListStackOpts object. Returns: The output of the stack operation, typically a Tensor.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\operators\\data_structures.py", - "ast_data": "FunctionDef name:list_stack arguments arg:list_ arg:opts If Call call:isinstance Return return:yes If Call call:is_tf_type If Compare op:Eq Return return:yes Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "get_text_list", - "source_code": "@keep_lazy_text def get_text_list(list_, last_word = gettext_lazy('or')): if not list_: return '' if len(list_) = = 1: return str(list_[0]) return '%s %s %s' % (_(', ').join((str(i) for i in list_[: -1])), str(last_word), str(list_[-1]))", - "docstring": ">>> get_text_list(['a', 'b', 'c', 'd']) 'a, b, c or d' >>> get_text_list(['a', 'b', 'c'], 'and') 'a, b and c' >>> get_text_list(['a', 'b'], 'and') 'a and b' >>> get_text_list(['a']) 'a' >>> get_text_list([]) ''", - "type": "function", - "file_path": "django\\django\\utils\\text.py", - "ast_data": "FunctionDef name:get_text_list arguments arg:list_ arg:last_word If Return return:yes If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "call_options", - "source_code": "def call_options(self): return ConversionOptions(recursive = self.recursive, user_requested = False, internal_convert_user_code = self.recursive, optional_features = self.optional_features)", - "docstring": "Returns the corresponding options to be used for recursive conversion.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\core\\converter.py", - "ast_data": "FunctionDef name:call_options arguments arg:self Return return:yes" - }, - { - "library": "pandas", - "name": "get_values", - "source_code": "def get_values(self, dtype: DtypeObj | None = None) -> np.ndarray: raise AbstractMethodError(self)", - "docstring": "return an internal format, currently just the ndarray this is often overridden to handle to_dense like operations", - "type": "method", - "file_path": "pandas\\pandas\\core\\internals\\blocks.py", - "ast_data": "FunctionDef name:get_values arguments arg:self arg:dtype type:DtypeObj | None Raise raises:AbstractMethodError(self)" - }, - { - "library": "pytorch", - "name": "deserialize", - "source_code": "@classmethod def deserialize(cls, json_str: str) -> 'GemmOperation': json_dict = json.loads(json_str) return cls._json_to_gemm_operation(json_dict)", - "docstring": "Deserialize JSON string to a GEMM operation. Args: json_str: JSON string of a GEMM operation Returns: GemmOperation: Reconstructed operation", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\serialization.py", - "ast_data": "FunctionDef name:deserialize arguments arg:cls arg:json_str type:str Assign Call call:loads Return return:yes" - }, - { - "library": "pytorch", - "name": "OpSupports", - "source_code": "@compatibility(is_backward_compatible = False) class OpSupports: @classmethod def decline_if_input_dtype(cls, dtype: torch.dtype) -> OperatorSupportBase: def _decline_if_input_dtype(submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node) -> bool: for arg in node.all_input_nodes: arg_dtype = _get_arg_dtype(arg) if arg_dtype = = dtype: return False return True return create_op_support(_decline_if_input_dtype) @classmethod def decline_if_node_in_names(cls, disallow_set: set[str]) -> OperatorSupportBase: def _decline_if_node_in_names(submodules: t.Mapping[str, torch.nn.Module], node: torch.fx.Node) -> bool: return node.name not in disallow_set return create_op_support(_decline_if_node_in_names)", - "docstring": "A set of atomic instances that can be combined together to form more complex operator support logic.", - "type": "class", - "file_path": "pytorch\\torch\\fx\\passes\\operator_support.py", - "ast_data": "ClassDef name:OpSupports Call call:compatibility FunctionDef name:decline_if_input_dtype arguments arg:cls arg:dtype type:torch.dtype FunctionDef name:_decline_if_input_dtype arguments arg:submodules type:t.Mapping[str, torch.nn.Module] arg:node type:torch.fx.Node For Assign Call call:_get_arg_dtype If Compare op:Eq Return return:yes Return return:yes Return return:yes FunctionDef name:decline_if_node_in_names arguments arg:cls arg:disallow_set type:set[str] FunctionDef name:_decline_if_node_in_names arguments arg:submodules type:t.Mapping[str, torch.nn.Module] arg:node type:torch.fx.Node Return return:yes Return return:yes" - }, - { - "library": "coconut", - "name": "paren_join", - "source_code": "def paren_join(items, sep): return items[0] if len(items) = = 1 else '(' + (') ' + sep + ' (').join(items) + ')'", - "docstring": "Join items by sep with parens around individual items but not the whole.", - "type": "function", - "file_path": "coconut\\coconut\\compiler\\util.py", - "ast_data": "FunctionDef name:paren_join arguments arg:items arg:sep Return return:yes" - }, - { - "library": "cherrypy", - "name": "header_elements", - "source_code": "def header_elements(fieldname, fieldvalue): if not fieldvalue: return [] result = [] for element in RE_HEADER_SPLIT.split(fieldvalue): if fieldname.startswith('Accept') or fieldname = = 'TE': hv = AcceptElement.from_str(element) else: hv = HeaderElement.from_str(element) result.append(hv) return list(reversed(sorted(result)))", - "docstring": "Return a sorted :class: list. Constucted from a comma-separated header string.", - "type": "function", - "file_path": "cherrypy\\cherrypy\\lib\\httputil.py", - "ast_data": "FunctionDef name:header_elements arguments arg:fieldname arg:fieldvalue If Return return:yes Assign For Call call:split If BoolOp Call call:startswith Compare op:Eq Assign Call call:from_str Assign Call call:from_str Return return:yes" - }, - { - "library": "tensorflow", - "name": "add_run_metadata", - "source_code": "def add_run_metadata(self, run_metadata, tag, global_step = None): if tag in self._session_run_tags: raise ValueError('The provided tag was already used for this event type') self._session_run_tags[tag] = True tagged_metadata = event_pb2.TaggedRunMetadata() tagged_metadata.tag = tag tagged_metadata.run_metadata = run_metadata.SerializeToString() event = event_pb2.Event(tagged_run_metadata = tagged_metadata) self._add_event(event, global_step)", - "docstring": "Adds a metadata information for a single session.run() call. Args: run_metadata: A protobuf object. tag: The tag name for this metadata. global_step: Number. Optional global step counter to record with the StepStats. Raises: ValueError: If the provided tag was already used for this type of event.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\writer.py", - "ast_data": "FunctionDef name:add_run_metadata arguments arg:self arg:run_metadata arg:tag arg:global_step If Compare op:In Raise raises:ValueError('The provided tag was already used for this event type') Assign Assign Call call:TaggedRunMetadata Assign Assign Call call:SerializeToString Assign Call call:Event" - }, - { - "library": "pandas", - "name": "last", - "source_code": "@final def last(self, numeric_only: bool = False, min_count: int = -1, skipna: bool = True) -> NDFrameT: def last_compat(obj: NDFrameT): def last(x: Series): arr = x.array[notna(x.array)] if not len(arr): return x.array.dtype.na_value return arr[-1] if isinstance(obj, DataFrame): return obj.apply(last) elif isinstance(obj, Series): return last(obj) else: raise TypeError(type(obj)) return self._agg_general(numeric_only = numeric_only, min_count = min_count, alias = 'last', npfunc = last_compat, skipna = skipna)", - "docstring": "Compute the last entry of each column within each group. Defaults to skipping NA elements. Parameters ---------- numeric_only : bool, default False Include only float, int, boolean columns. If None, will attempt to use everything, then use only numeric data. min_count : int, default -1 The required number of valid values to perform the operation. If fewer than `` valid values are present the result will be NA. skipna : bool, default True Exclude NA/null values. If an entire group is NA, the result will be NA. .. versionadded:: 2.2.1 Returns ------- Series or DataFrame Last of values within each group. See Also -------- DataFrame.groupby : Apply a function groupby to each row or column of a DataFrame. core.groupby.DataFrameGroupBy.first : Compute the first non-null entry of each column. core.groupby.DataFrameGroupBy.nth : Take the nth row from each group. Examples -------- >>> df = pd.DataFrame(dict(A=[1, 1, 3], B=[5, None, 6], C=[1, 2, 3])) >>> df.groupby(\"A\").last() B C A 1 5.0 2 3 6.0 3", - "type": "method", - "file_path": "pandas\\pandas\\core\\groupby\\groupby.py", - "ast_data": "FunctionDef name:last arguments arg:self arg:numeric_only type:bool arg:min_count type:int arg:skipna type:bool FunctionDef name:last_compat arguments arg:obj type:NDFrameT FunctionDef name:last arguments arg:x type:Series Assign If Return return:yes Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes Raise raises:TypeError(type(obj)) Return return:yes" - }, - { - "library": "scikit-learn", - "name": "clone", - "source_code": "def clone(estimator, *, safe = True): if hasattr(estimator, '__sklearn_clone__') and (not inspect.isclass(estimator)): return estimator.__sklearn_clone__() return _clone_parametrized(estimator, safe = safe)", - "docstring": "Construct a new unfitted estimator with the same parameters. Clone does a deep copy of the model in an estimator without actually copying attached data. It returns a new estimator with the same parameters that has not been fitted on any data. .. versionchanged:: 1.3 Delegates to if the method exists. Parameters ---------- estimator : {list, tuple, set} of estimator instance or a single estimator instance The estimator or group of estimators to be cloned. safe : bool, default=True If safe is False, clone will fall back to a deep copy on objects that are not estimators. Ignored if exists. Returns ------- estimator : object The deep copy of the input, an estimator if input is an estimator. Notes ----- If the estimator's parameter is an integer (or if the estimator doesn't have a parameter), an *exact clone* is returned: the clone and the original estimator will give the exact same results. Otherwise, *statistical clone* is returned: the clone might return different results from the original estimator. More details can be found in :ref:. Examples -------- >>> from sklearn.base import clone >>> from sklearn.linear_model import LogisticRegression >>> X = [[-1, 0], [0, 1], [0, -1], [1, 0]] >>> y = [0, 0, 1, 1] >>> classifier = LogisticRegression().fit(X, y) >>> cloned_classifier = clone(classifier) >>> hasattr(classifier, \"classes_\") True >>> hasattr(cloned_classifier, \"classes_\") False >>> classifier is cloned_classifier False", - "type": "function", - "file_path": "scikit-learn\\sklearn\\base.py", - "ast_data": "FunctionDef name:clone arguments arg:estimator If BoolOp Call call:hasattr Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "Statement", - "source_code": "class Statement(Reference): def __init__(self, template, **parts): self.template = template self.parts = parts def references_table(self, table): return any((hasattr(part, 'references_table') and part.references_table(table) for part in self.parts.values())) def references_column(self, table, column): return any((hasattr(part, 'references_column') and part.references_column(table, column) for part in self.parts.values())) def references_index(self, table, index): return any((hasattr(part, 'references_index') and part.references_index(table, index) for part in self.parts.values())) def rename_table_references(self, old_table, new_table): for part in self.parts.values(): if hasattr(part, 'rename_table_references'): part.rename_table_references(old_table, new_table) def rename_column_references(self, table, old_column, new_column): for part in self.parts.values(): if hasattr(part, 'rename_column_references'): part.rename_column_references(table, old_column, new_column) def __str__(self): return self.template % self.parts", - "docstring": "Statement template and formatting parameters container. Allows keeping a reference to a statement without interpolating identifiers that might have to be adjusted if they're referencing a table or column that is removed", - "type": "class", - "file_path": "django\\django\\db\\backends\\ddl_references.py", - "ast_data": "ClassDef name:Statement FunctionDef name:__init__ arguments arg:self arg:template kwarg:parts Assign Assign FunctionDef name:references_table arguments arg:self arg:table Return return:yes FunctionDef name:references_column arguments arg:self arg:table arg:column Return return:yes FunctionDef name:references_index arguments arg:self arg:table arg:index Return return:yes FunctionDef name:rename_table_references arguments arg:self arg:old_table arg:new_table For Call call:values If Call call:hasattr FunctionDef name:rename_column_references arguments arg:self arg:table arg:old_column arg:new_column For Call call:values If Call call:hasattr FunctionDef name:__str__ arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "graph_execution_trace_to_tensor_id", - "source_code": "def graph_execution_trace_to_tensor_id(self, trace): return self.symbolic_tensor_id(trace.graph_id, trace.op_name, trace.output_slot)", - "docstring": "Get symbolic tensor ID from a GraphExecutoinTraceDigest object.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", - "ast_data": "FunctionDef name:graph_execution_trace_to_tensor_id arguments arg:self arg:trace Return return:yes" - }, - { - "library": "sphinx", - "name": "members_of", - "source_code": "def members_of(obj: Any, *, config: Config) -> Sequence[str]: if config.autosummary_ignore_module_all: return dir(obj) else: if (obj___all__: = getall(obj)) is not None: return obj___all__ return dir(obj)", - "docstring": "Get the members of `` setting.", - "type": "function", - "file_path": "sphinx\\sphinx\\ext\\autosummary\\generate.py", - "ast_data": "FunctionDef name:members_of arguments arg:obj type:Any If Return return:yes If Compare op:IsNot Return return:yes Return return:yes" - }, - { - "library": "scikit-learn", - "name": "diag", - "source_code": "def diag(self, X): return np.apply_along_axis(self, 1, X).ravel()", - "docstring": "Returns the diagonal of the kernel k(X, X). The result of this method is identical to np.diag(self(X)); however, it can be evaluated more efficiently since only the diagonal is evaluated. Parameters ---------- X : ndarray of shape (n_samples_X, n_features) Left argument of the returned kernel k(X, Y) Returns ------- K_diag : ndarray of shape (n_samples_X,) Diagonal of kernel k(X, X)", - "type": "method", - "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", - "ast_data": "FunctionDef name:diag arguments arg:self arg:X Return return:yes" - }, - { - "library": "numpy", - "name": "get_masked_subclass", - "source_code": "def get_masked_subclass(*arrays): if len(arrays) = = 1: arr = arrays[0] if isinstance(arr, MaskedArray): rcls = type(arr) else: rcls = MaskedArray else: arrcls = [type(a) for a in arrays] rcls = arrcls[0] if not issubclass(rcls, MaskedArray): rcls = MaskedArray for cls in arrcls[1:]: if issubclass(cls, rcls): rcls = cls if rcls.__name__ = = 'MaskedConstant': return MaskedArray return rcls", - "docstring": "Return the youngest subclass of MaskedArray from a list of (masked) arrays. In case of siblings, the first listed takes over.", - "type": "function", - "file_path": "numpy\\numpy\\ma\\core.py", - "ast_data": "FunctionDef name:get_masked_subclass arguments vararg:arrays If Compare op:Eq Assign If Call call:isinstance Assign Call call:type Assign Assign Assign If Assign For If Call call:issubclass Assign If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_alpha", - "source_code": "def set_alpha(self, alpha): martist.Artist._set_alpha_for_array(self, alpha) if np.ndim(alpha) not in (0, 2): raise TypeError('alpha must be a float, two-dimensional array, or None') self._imcache = None", - "docstring": "Set the alpha value used for blending - not supported on all backends. Parameters ---------- alpha : float or 2D array-like or None", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\image.py", - "ast_data": "FunctionDef name:set_alpha arguments arg:self arg:alpha If Compare op:NotIn Raise raises:TypeError('alpha must be a float, two-dimensional array, or None') Assign" - }, - { - "library": "scikit-learn", - "name": "PositiveSpectrumWarning", - "source_code": "class PositiveSpectrumWarning(UserWarning): pass", - "docstring": "Warning raised when the eigenvalues of a PSD matrix have issues This warning is typically raised by `` when the eigenvalues of a positive semidefinite (PSD) matrix such as a gram matrix (kernel) present significant negative eigenvalues, or bad conditioning i.e. very small non-zero eigenvalues compared to the largest eigenvalue. .. versionadded:: 0.22", - "type": "class", - "file_path": "scikit-learn\\sklearn\\exceptions.py", - "ast_data": "ClassDef name:PositiveSpectrumWarning" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, max_shard_bytes, max_shards = None, bytes_per_string = 16): if max_shard_bytes < 1: raise ValueError(f'Argument `max_shard_bytes` must be positive. Received {max_shard_bytes}') if max_shards and max_shards < 1: raise ValueError(f'Argument `max_shards` must be positive. Received {max_shards}') if bytes_per_string < 1: raise ValueError(f'Argument `bytes_per_string` must be positive. Received: {bytes_per_string}') self._max_shard_bytes = max_shard_bytes self._max_shards = max_shards self._bytes_per_string = bytes_per_string", - "docstring": "Creates a new . Args: max_shard_bytes: The maximum size any given shard is allowed to be. max_shards: The maximum number of shards in created taking precedence over . bytes_per_string: If the partition value is of type string, this provides an estimate of how large each string is.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\sharded_variable.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:max_shard_bytes arg:max_shards arg:bytes_per_string If Compare op:Lt Raise raises:ValueError(f'Argument `max_shard_bytes` must be positive. Received {max_shard_bytes}') If BoolOp Compare op:Lt Raise raises:ValueError(f'Argument `max_shards` must be positive. Received {max_shards}') If Compare op:Lt Raise raises:ValueError(f'Argument `bytes_per_string` must be positive. Received: {bytes_per_string}') Assign Assign Assign" - }, - { - "library": "tensorflow", - "name": "LSTMStateTuple", - "source_code": "@tf_export(v1 = ['nn.rnn_cell.LSTMStateTuple']) class LSTMStateTuple(_LSTMStateTuple): __slots__ = () @property def dtype(self): c, h = self if c.dtype ! = h.dtype: raise TypeError('Inconsistent internal state: %s vs %s' % (str(c.dtype), str(h.dtype))) return c.dtype", - "docstring": "Tuple used by LSTM Cells for , , and output state. Stores two elements: , in that order. Where is the hidden state and is the output. Only used when .", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py", - "ast_data": "ClassDef name:LSTMStateTuple Call call:tf_export Assign FunctionDef name:dtype arguments arg:self Assign If Compare op:NotEq Raise raises:TypeError('Inconsistent internal state: %s vs %s' % (str(c.dtype), str(h.dtype))) Return return:yes" - }, - { - "library": "pytorch", - "name": "check_file", - "source_code": "def check_file(filename: str, test_globs: list[str] = CPP_TEST_GLOBS) -> list[LintMessage]: lint_messages: list[LintMessage] = [] symbols: dict[str, int] = {} with open(filename) as f: for idx, line in enumerate(f): symbol = line.strip() if not symbol or symbol[0] = = '#': continue symbols[symbol] = idx + 1 symbols_regex = re.compile('|'.join(sorted(symbols.keys(), reverse = True))) matched_symbols = find_matched_symbols(symbols_regex, test_globs) for s, lineno in symbols.items(): if s not in matched_symbols: lint_messages.append(LintMessage(path = filename, line = lineno, char = None, code = LINTER_CODE, severity = LintSeverity.ERROR, name = '[untested-symbol]', original = None, replacement = None, description = f\"{s} has been included as a header-only API but is not tested in any of CPP_TEST_GLOBS, which contains {CPP_TEST_GLOBS}.\\nPlease add a .cpp test using the symbol without linking anything to verify that the symbol is in fact header-only. If you already have a test but it's not found, please add the .cpp file to CPP_TEST_GLOBS in tools/linters/adapters/header_only_linter.py.\")) return lint_messages", - "docstring": "Goes through the header_only_apis.txt file and verifies that all symbols within the file can be found tested in an appropriately independent .cpp file. Note that we expect CPP_TEST_GLOBS to be passed in as test_globs--the only reason this is an argument at all is for ease of testing.", - "type": "function", - "file_path": "pytorch\\tools\\linter\\adapters\\header_only_linter.py", - "ast_data": "FunctionDef name:check_file arguments arg:filename type:str arg:test_globs type:list[str] With For Call call:enumerate Assign Call call:strip If BoolOp Compare op:Eq Assign Assign Call call:compile Assign Call call:find_matched_symbols For Call call:items If Compare op:NotIn Return return:yes" - }, - { - "library": "authlib", - "name": "create_save_token_func", - "source_code": "def create_save_token_func(session, token_model): def save_token(token, request): if request.user: user_id = request.user.get_user_id() else: user_id = None client = request.client item = token_model(client_id = client.client_id, user_id = user_id, **token) session.add(item) session.commit() return save_token", - "docstring": "Create an `` function that can be used in authorization server. :param session: SQLAlchemy session :param token_model: Token model class", - "type": "function", - "file_path": "authlib\\authlib\\integrations\\sqla_oauth2\\functions.py", - "ast_data": "FunctionDef name:create_save_token_func arguments arg:session arg:token_model FunctionDef name:save_token arguments arg:token arg:request If Assign Call call:get_user_id Assign Assign Assign Call call:token_model Return return:yes" - }, - { - "library": "pytorch", - "name": "LogSigmoid", - "source_code": "class LogSigmoid(Module): def forward(self, input: Tensor) -> Tensor: return F.logsigmoid(input)", - "docstring": "Applies the Logsigmoid function element-wise. .. math:: \\text{LogSigmoid}(x) = \\log\\left(\\frac{ 1 }{ 1 + \\exp(-x)}\\right) Shape: - Input: :math:, where :math: means any number of dimensions. - Output: :math:, same shape as the input. .. image:: ../scripts/activation_images/LogSigmoid.png Examples:: >>> m = nn.LogSigmoid() >>> input = torch.randn(2) >>> output = m(input)", - "type": "class", - "file_path": "pytorch\\torch\\nn\\modules\\activation.py", - "ast_data": "ClassDef name:LogSigmoid FunctionDef name:forward arguments arg:self arg:input type:Tensor Return return:yes" - }, - { - "library": "mongo", - "name": "BSON", - "source_code": "class BSON(bytes): @classmethod def encode(cls: Type[BSON], document: Mapping[str, Any], check_keys: bool = False, codec_options: CodecOptions[Any] = DEFAULT_CODEC_OPTIONS) -> BSON: return cls(encode(document, check_keys, codec_options)) def decode(self, codec_options: CodecOptions[Any] = DEFAULT_CODEC_OPTIONS) -> dict[str, Any]: return decode(self, codec_options)", - "docstring": "BSON (Binary JSON) data. .. warning:: Using this class to encode and decode BSON adds a performance cost. For better performance use the module level functions :func: and :func: instead.", - "type": "class", - "file_path": "mongo\\bson\\__init__.py", - "ast_data": "ClassDef name:BSON FunctionDef name:encode arguments arg:cls type:Type[BSON] arg:document type:Mapping[str, Any] arg:check_keys type:bool arg:codec_options type:CodecOptions[Any] Return return:yes FunctionDef name:decode arguments arg:self arg:codec_options type:CodecOptions[Any] Return return:yes" - }, - { - "library": "django", - "name": "add_root_elements", - "source_code": "def add_root_elements(self, handler): pass", - "docstring": "Add elements in the root (i.e. feed/channel) element. Called from write().", - "type": "method", - "file_path": "django\\django\\utils\\feedgenerator.py", - "ast_data": "FunctionDef name:add_root_elements arguments arg:self arg:handler" - }, - { - "library": "scikit-learn", - "name": "plot", - "source_code": "def plot(self, ax = None, *, negate_score = False, score_name = None, score_type = 'both', std_display_style = 'fill_between', line_kw = None, fill_between_kw = None, errorbar_kw = None): self._plot_curve(self.param_range, ax = ax, negate_score = negate_score, score_name = score_name, score_type = score_type, std_display_style = std_display_style, line_kw = line_kw, fill_between_kw = fill_between_kw, errorbar_kw = errorbar_kw) self.ax_.set_xlabel(f'{self.param_name}') return self", - "docstring": "Plot visualization. Parameters ---------- ax : matplotlib Axes, default=None Axes object to plot on. If , a new figure and axes is created. negate_score : bool, default=False Whether or not to negate the scores obtained through :func:. This is particularly useful when using the error denoted by in . score_name : str, default=None The name of the score used to decorate the y-axis of the plot. It will override the name inferred from the parameter. If is , we use if is and otherwise. If is a string or a callable, we infer the name. We replace by spaces and capitalize the first letter. We remove and replace it by if is or just remove it otherwise. score_type : {\"test\", \"train\", \"both\"}, default=\"both\" The type of score to plot. Can be one of , , or . std_display_style : {\"errorbar\", \"fill_between\"} or None, default=\"fill_between\" The style used to display the score standard deviation around the mean score. If None, no standard deviation representation is displayed. line_kw : dict, default=None Additional keyword arguments passed to the used to draw the mean score. fill_between_kw : dict, default=None Additional keyword arguments passed to the used to draw the score standard deviation. errorbar_kw : dict, default=None Additional keyword arguments passed to the used to draw mean score and standard deviation score. Returns ------- display : :class: Object that stores computed values.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\model_selection\\_plot.py", - "ast_data": "FunctionDef name:plot arguments arg:self arg:ax Return return:yes" - }, - { - "library": "tensorflow", - "name": "round", - "source_code": "@dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def round(x): return math_ops.round(x)", - "docstring": "Element-wise rounding to the closest integer. In case of tie, the rounding mode used is \"half to even\". Args: x: Tensor or variable. Returns: A tensor.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", - "ast_data": "FunctionDef name:round arguments arg:x Return return:yes" - }, - { - "library": "pytorch", - "name": "get_lowered_module_name", - "source_code": "def get_lowered_module_name(root: torch.nn.Module, lowered_module: LOWERED_BACKEND_MODULE_TYPE) -> str: qualname = None i = 0 while True: qualname = f'lowered_module_{i}' if not hasattr(root, qualname): break i + = 1 assert qualname is not None root.add_module(qualname, lowered_module) return qualname", - "docstring": "Adds the given lowered_module into the given root module and returns the name of the module added.", - "type": "function", - "file_path": "pytorch\\torch\\_higher_order_ops\\executorch_call_delegate.py", - "ast_data": "FunctionDef name:get_lowered_module_name arguments arg:root type:torch.nn.Module arg:lowered_module type:LOWERED_BACKEND_MODULE_TYPE Assign Assign While Assign If Return return:yes" - }, - { - "library": "authlib", - "name": "generate_key", - "source_code": "@classmethod def generate_key(cls, kty, crv_or_size, options = None, is_private = False): key_cls = cls.JWK_KEY_CLS[kty] return key_cls.generate_key(crv_or_size, options, is_private)", - "docstring": "Generate a Key with the given key type, curve name or bit size. :param kty: string of `` :param crv_or_size: curve name or bit size :param options: a dict of other options for Key :param is_private: create a private key or public key :return: Key instance", - "type": "method", - "file_path": "authlib\\authlib\\jose\\rfc7517\\jwk.py", - "ast_data": "FunctionDef name:generate_key arguments arg:cls arg:kty arg:crv_or_size arg:options arg:is_private Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "source", - "source_code": "def source(self, *args, **kwargs): print(self._source(*args))", - "docstring": "Print source code for the function corresponding to inputs", - "type": "method", - "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\dispatcher.py", - "ast_data": "FunctionDef name:source arguments arg:self vararg:args kwarg:kwargs" - }, - { - "library": "scipy", - "name": "trrad", - "source_code": "def trrad(delta_in, dnorm, eta1, eta2, gamma1, gamma2, ratio): if DEBUGGING: assert delta_in > = dnorm > 0 assert 0 < = eta1 < = eta2 < 1 assert 0 < gamma1 < 1 < gamma2 assert not np.isnan(ratio) if ratio < = eta1: delta = gamma1 * dnorm elif ratio < = eta2: delta = max(gamma1 * delta_in, dnorm) else: delta = max(gamma1 * delta_in, gamma2 * dnorm) if DEBUGGING: assert delta > 0 return delta", - "docstring": "This function updates the trust region radius according to RATIO and DNORM.", - "type": "function", - "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\cobyla\\trustregion.py", - "ast_data": "FunctionDef name:trrad arguments arg:delta_in arg:dnorm arg:eta1 arg:eta2 arg:gamma1 arg:gamma2 arg:ratio If If Compare op:LtE Assign If Compare op:LtE Assign Call call:max Assign Call call:max If Return return:yes" - }, - { - "library": "flexx", - "name": "add_asset", - "source_code": "def add_asset(self, a): if not isinstance(a, Asset): raise TypeError('Bundles.add_asset() needs an Asset, not %s.' % a.__class__.__name__) if isinstance(a, Bundle): raise TypeError('Bundles can contain assets and modules, but not bundles.') self._assets.append(a)", - "docstring": "Add an asset to the bundle. Assets added this way occur before the code for the modules in this bundle.", - "type": "method", - "file_path": "flexx\\flexx\\app\\_asset.py", - "ast_data": "FunctionDef name:add_asset arguments arg:self arg:a If Raise raises:TypeError('Bundles.add_asset() needs an Asset, not %s.' % a.__class__.__name__) If Call call:isinstance Raise raises:TypeError('Bundles can contain assets and modules, but not bundles.')" - }, - { - "library": "scipy", - "name": "DenseOutput", - "source_code": "class DenseOutput: def __init__(self, t_old, t): self.t_old = t_old self.t = t self.t_min = min(t, t_old) self.t_max = max(t, t_old) def __call__(self, t): t = np.asarray(t) if t.ndim > 1: raise ValueError('`t` must be a float or a 1-D array.') return self._call_impl(t) def _call_impl(self, t): raise NotImplementedError", - "docstring": "Base class for local interpolant over step made by an ODE solver. It interpolates between and (see Attributes below). Evaluation outside this interval is not forbidden, but the accuracy is not guaranteed. Attributes ---------- t_min, t_max : float Time range of the interpolation.", - "type": "class", - "file_path": "scipy\\scipy\\integrate\\_ivp\\base.py", - "ast_data": "ClassDef name:DenseOutput FunctionDef name:__init__ arguments arg:self arg:t_old arg:t Assign Assign Assign Call call:min Assign Call call:max FunctionDef name:__call__ arguments arg:self arg:t Assign Call call:asarray If Compare op:Gt Raise raises:ValueError('`t` must be a float or a 1-D array.') Return return:yes FunctionDef name:_call_impl arguments arg:self arg:t Raise raises:NotImplementedError" - }, - { - "library": "numpy", - "name": "swapaxes", - "source_code": "@array_function_dispatch(_swapaxes_dispatcher) def swapaxes(a, axis1, axis2): return _wrapfunc(a, 'swapaxes', axis1, axis2)", - "docstring": "Interchange two axes of an array. Parameters ---------- a : array_like Input array. axis1 : int First axis. axis2 : int Second axis. Returns ------- a_swapped : ndarray For NumPy >= 1.10.0, if is an ndarray, then a view of is returned; otherwise a new array is created. For earlier NumPy versions a view of is returned only if the order of the axes is changed, otherwise the input array is returned. Examples -------- >>> import numpy as np >>> x = np.array([[1,2,3]]) >>> np.swapaxes(x,0,1) array([[1], [2], [3]]) >>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]]) >>> x array([[[0, 1], [2, 3]], [[4, 5], [6, 7]]]) >>> np.swapaxes(x,0,2) array([[[0, 4], [2, 6]], [[1, 5], [3, 7]]])", - "type": "function", - "file_path": "numpy\\numpy\\_core\\fromnumeric.py", - "ast_data": "FunctionDef name:swapaxes arguments arg:a arg:axis1 arg:axis2 Call call:array_function_dispatch Return return:yes" - }, - { - "library": "scipy", - "name": "correlation", - "source_code": "def correlation(u, v, w = None, centered = True): u = _validate_vector(u) v = _validate_vector(v) if np.iscomplexobj(u) or np.iscomplexobj(v): message = 'Complex `u` and `v` are deprecated and will raise an error in SciPy 1.17.0.' warnings.warn(message, DeprecationWarning, stacklevel = 2) if w is not None: w = _validate_weights(w) w = w / w.sum() if centered: if w is not None: umu = np.dot(u, w) vmu = np.dot(v, w) else: umu = np.mean(u) vmu = np.mean(v) u = u - umu v = v - vmu if w is not None: vw = v * w uw = u * w else: vw, uw = (v, u) uv = np.dot(u, vw) uu = np.dot(u, uw) vv = np.dot(v, vw) dist = 1.0 - uv / math.sqrt(uu * vv) return np.clip(dist, 0.0, 2.0)", - "docstring": "Compute the correlation distance between two 1-D arrays. The correlation distance between and , is defined as .. math:: 1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})} {{\\|(u - \\bar{u})\\|}_2 {\\|(v - \\bar{v})\\|}_2} where :math: is the mean of the elements of and :math: is the dot product of :math: and :math:. Parameters ---------- u : (N,) array_like of floats Input array. .. deprecated:: 1.15.0 Complex is deprecated and will raise an error in SciPy 1.17.0 v : (N,) array_like of floats Input array. .. deprecated:: 1.15.0 Complex is deprecated and will raise an error in SciPy 1.17.0 w : (N,) array_like of floats, optional The weights for each value in and . Default is None, which gives each value a weight of 1.0 centered : bool, optional If True, and will be centered. Default is True. Returns ------- correlation : double The correlation distance between 1-D array and . Examples -------- Find the correlation between two arrays. >>> from scipy.spatial.distance import correlation >>> correlation([1, 0, 1], [1, 1, 0]) 1.5 Using a weighting array, the correlation can be calculated as: >>> correlation([1, 0, 1], [1, 1, 0], w=[0.9, 0.1, 0.1]) 1.1 If centering is not needed, the correlation can be calculated as: >>> correlation([1, 0, 1], [1, 1, 0], centered=False) 0.5", - "type": "function", - "file_path": "scipy\\scipy\\spatial\\distance.py", - "ast_data": "FunctionDef name:correlation arguments arg:u arg:v arg:w arg:centered Assign Call call:_validate_vector Assign Call call:_validate_vector If BoolOp Call call:iscomplexobj Call call:iscomplexobj Assign If Compare op:IsNot Assign Call call:_validate_weights Assign If If Compare op:IsNot Assign Call call:dot Assign Call call:dot Assign Call call:mean Assign Call call:mean Assign Assign If Compare op:IsNot Assign Assign Assign Assign Call call:dot Assign Call call:dot Assign Call call:dot Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "get_lr", - "source_code": "@override def get_lr(self) -> list[float]: _warn_get_lr_called_within_step(self) if self.last_epoch not in self.milestones: return [group['lr'] for group in self.optimizer.param_groups] return [group['lr'] * self.gamma ** self.milestones[self.last_epoch] for group in self.optimizer.param_groups]", - "docstring": "Compute the learning rate of each parameter group.", - "type": "method", - "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", - "ast_data": "FunctionDef name:get_lr arguments arg:self If Compare op:NotIn Return return:yes Return return:yes" - }, - { - "library": "kornia", - "name": "visualize", - "source_code": "def visualize(self, images: Union[Tensor, List[Tensor]], edge_maps: Optional[Union[Tensor, List[Tensor]]] = None, output_type: str = 'torch') -> Union[Tensor, List[Tensor], List['Image.Image']]: if edge_maps is None: edge_maps = self.forward(images) output = [] for edge_map in edge_maps: output.append(edge_map) return self._tensor_to_type(output, output_type, is_batch = isinstance(images, Tensor))", - "docstring": "Draw the super resolution results. Args: images: input tensor. edge_maps: detected edges. output_type: type of the output. Returns: output tensor.", - "type": "method", - "file_path": "kornia\\kornia\\models\\super_resolution\\base.py", - "ast_data": "FunctionDef name:visualize arguments arg:self arg:images type:Union[Tensor, List[Tensor]] arg:edge_maps type:Optional[Union[Tensor, List[Tensor]]] arg:output_type type:str If Compare op:Is Assign Call call:forward Assign For Return return:yes" - }, - { - "library": "pytorch", - "name": "inline_user_function_return", - "source_code": "def inline_user_function_return(self, fn, args, kwargs): if config.enable_faithful_generator_behavior and is_generator(fn.get_code()): return self.inline_generator_function(fn, args, kwargs) else: return InliningInstructionTranslator.inline_call(self, fn, args, kwargs)", - "docstring": "A call to some user defined function by inlining it.", - "type": "method", - "file_path": "pytorch\\torch\\_dynamo\\symbolic_convert.py", - "ast_data": "FunctionDef name:inline_user_function_return arguments arg:self arg:fn arg:args arg:kwargs If BoolOp Call call:is_generator Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "set_memory_growth", - "source_code": "@tf_export('config.experimental.set_memory_growth') def set_memory_growth(device, enable): context.context().set_memory_growth(device, enable)", - "docstring": "Set if memory growth should be enabled for a . If memory growth is enabled for a , the runtime initialization will not allocate all memory on the device. Memory growth cannot be configured on a with virtual devices configured. For example: >>> physical_devices = tf.config.list_physical_devices('GPU') >>> try: ... tf.config.experimental.set_memory_growth(physical_devices[0], True) ... except: ... # Invalid device or cannot modify virtual devices once initialized. ... pass Args: device: to configure enable: (Boolean) Whether to enable or disable memory growth Raises: ValueError: Invalid specified. RuntimeError: Runtime is already initialized.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\config.py", - "ast_data": "FunctionDef name:set_memory_growth arguments arg:device arg:enable Call call:tf_export" - }, - { - "library": "pytorch", - "name": "row_or_column_stride", - "source_code": "def row_or_column_stride(self, node: IRNode, default_value: int = 0) -> str: if node is None or len(node.get_stride()) < 2: return str(default_value) stride0 = node.get_stride()[-1] stride1 = node.get_stride()[-2] if stride0 = = 1: return cexpr(self.rename_indexing(stride1)) elif stride1 = = 1: return cexpr(self.rename_indexing(stride0)) else: raise RuntimeError(f'At least 1 stride should be 1. Strides: node.get_stride() = {node.get_stride()!r}')", - "docstring": "Hook called from template code to get the row or column stride of an arg. This is required by some CUTLASS 2.X APIs. If the node is in row_major, it returns stride[-2]. If the node is in column_major, it returns stride[-1]. TODO: Will add needed args to pass it in if it is dynamic.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_kernel.py", - "ast_data": "FunctionDef name:row_or_column_stride arguments arg:self arg:node type:IRNode arg:default_value type:int If BoolOp Compare op:Is Compare op:Lt Return return:yes Assign Assign If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes Raise raises:RuntimeError(f'At least 1 stride should be 1. Strides: node.get_stride()={node.get_stride()!r}')" - }, - { - "library": "pytorch", - "name": "supported_activities", - "source_code": "def supported_activities(): return torch.autograd._supported_activities()", - "docstring": "Returns a set of supported profiler tracing activities. Note: profiler uses CUPTI library to trace on-device CUDA kernels. In case when CUDA is enabled but CUPTI is not available, passing ``). This, in turn, results in including CUDA time in the profiler table output, but not in the JSON trace.", - "type": "function", - "file_path": "pytorch\\torch\\profiler\\profiler.py", - "ast_data": "FunctionDef name:supported_activities arguments Return return:yes" - }, - { - "library": "pytorch", - "name": "load_nvprof", - "source_code": "def load_nvprof(path): return EventList(parse_nvprof_trace(path))", - "docstring": "Open an nvprof trace file and parses autograd annotations. Args: path (str): path to nvprof trace", - "type": "function", - "file_path": "pytorch\\torch\\autograd\\profiler.py", - "ast_data": "FunctionDef name:load_nvprof arguments arg:path Return return:yes" - }, - { - "library": "mongo", - "name": "on_change", - "source_code": "async def on_change(self, server_description: ServerDescription, reset_pool: bool = False, interrupt_connections: bool = False) -> None: async with self._lock: if self._opened and self._description.has_server(server_description.address): await self._process_change(server_description, reset_pool, interrupt_connections) if reset_pool: server = self._servers.get(server_description.address) if server: await server.pool.reset(interrupt_connections = interrupt_connections)", - "docstring": "Process a new ServerDescription after an hello call completes.", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\topology.py", - "ast_data": "AsyncFunctionDef name:on_change arguments arg:self arg:server_description type:ServerDescription arg:reset_pool type:bool arg:interrupt_connections type:bool If Assign Call call:get If" - }, - { - "library": "matplotlib", - "name": "set_axis", - "source_code": "def set_axis(self, axis): self._axis = axis", - "docstring": "Select axis. Parameters ---------- axis : {\"both\", \"x\", \"y\"}", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py", - "ast_data": "FunctionDef name:set_axis arguments arg:self arg:axis Assign" - }, - { - "library": "pandas", - "name": "get_node", - "source_code": "def get_node(self, key: str) -> Node | None: self._check_if_open() if not key.startswith('/'): key = '/' + key assert self._handle is not None assert _table_mod is not None try: node = self._handle.get_node(self.root, key) except _table_mod.exceptions.NoSuchNodeError: return None assert isinstance(node, _table_mod.Node), type(node) return node", - "docstring": "return the node with the key or None if it does not exist", - "type": "method", - "file_path": "pandas\\pandas\\io\\pytables.py", - "ast_data": "FunctionDef name:get_node arguments arg:self arg:key type:str If Assign Try Assign Call call:get_node ExceptHandler Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "check_same_dtype", - "source_code": "def check_same_dtype(*args): full_dtype = None scalar_type = None for arg in args: if isinstance(arg, Number): continue elif isinstance(arg, TensorLike): if full_dtype is None: full_dtype = arg.dtype if scalar_type is None: scalar_type = dtype_to_type(arg.dtype) if full_dtype is not arg.dtype: msg = 'Tensor with dtype ' + str(arg.dtype) + ' is not the expected dtype of ' + str(full_dtype) + '!' raise RuntimeError(msg) arg_type = dtype_to_type(arg.dtype) if arg_type is not scalar_type: msg = 'Tensor with corresponding Python type ' + str(arg_type) + ' is not the expected type of ' + str(scalar_type) + '!' raise RuntimeError(msg) else: msg = 'Unexpected type when checking for same dtype, ' + str(type(arg)) + '!' raise RuntimeError(msg)", - "docstring": "Checks that all Tensors in args have the same device and that all Numbers have the same corresponding Python type. Raises a RuntimeError when: - args contains an object whose type is not Tensor or Number - two Tensors objects in args have different dtypes - two Number objects in args have different types - there are Tensors and Numbers in args, and one of those Tensors corresponding Python types is different from the type of one of those Numbers", - "type": "function", - "file_path": "pytorch\\torch\\_prims_common\\__init__.py", - "ast_data": "FunctionDef name:check_same_dtype arguments vararg:args Assign Assign For If Call call:isinstance If Call call:isinstance If Compare op:Is Assign If Compare op:Is Assign Call call:dtype_to_type If Compare op:IsNot Assign Raise raises:RuntimeError(msg) Assign Call call:dtype_to_type If Compare op:IsNot Assign Raise raises:RuntimeError(msg) Assign Raise raises:RuntimeError(msg)" - }, - { - "library": "tensorflow", - "name": "calc_control_outputs", - "source_code": "def calc_control_outputs(self, graph): control_outputs = {} for op in graph.get_operations(): for control_input in op.control_inputs: if control_input not in control_outputs: control_outputs[control_input] = set() control_outputs[control_input].add(op) return control_outputs", - "docstring": "Returns the map of control_outputs for a given graph. Args: graph: The graph to parse. Returns: A map of the control outputs.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\subscribe.py", - "ast_data": "FunctionDef name:calc_control_outputs arguments arg:self arg:graph Assign For Call call:get_operations For If Compare op:NotIn Assign Call call:set Return return:yes" - }, - { - "library": "tensorflow", - "name": "split", - "source_code": "def split(tensor, split_dimension, num_devices, assign_tuple_sharding = False, use_sharding_op = False, input_shape = None): return Sharding.split(tensor, split_dimension, num_devices, input_shape).apply_to_tensor(tensor, assign_tuple_sharding = assign_tuple_sharding, use_sharding_op = use_sharding_op)", - "docstring": "Returns a tensor that is split along the given dimension. Args: tensor: A tf.Tensor to split. split_dimension: The dimension to split. num_devices: The number of devices to partition the dimension. assign_tuple_sharding: If the sharding type should be a tuple. use_sharding_op: If true, adds a sharding op to set the sharding. input_shape: The full shape of the input tensor.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py", - "ast_data": "FunctionDef name:split arguments arg:tensor arg:split_dimension arg:num_devices arg:assign_tuple_sharding arg:use_sharding_op arg:input_shape Return return:yes" - }, - { - "library": "sphinx", - "name": "is_longtable", - "source_code": "def is_longtable(self) -> bool: return self.row > 30 or 'longtable' in self.classes", - "docstring": "True if and only if table uses longtable environment.", - "type": "method", - "file_path": "sphinx\\sphinx\\writers\\latex.py", - "ast_data": "FunctionDef name:is_longtable arguments arg:self Return return:yes" - }, - { - "library": "algorithms", - "name": "base_to_int", - "source_code": "def base_to_int(str_to_convert, base): digit = {} for ind, char in enumerate(string.digits + string.ascii_uppercase): digit[char] = ind multiplier = 1 res = 0 for char in str_to_convert[: : -1]: res + = digit[char] * multiplier multiplier * = base return res", - "docstring": "Note : You can use int() built-in function instead of this. :type str_to_convert: str :type base: int :rtype: int", - "type": "function", - "file_path": "algorithms\\algorithms\\maths\\base_conversion.py", - "ast_data": "FunctionDef name:base_to_int arguments arg:str_to_convert arg:base Assign For Call call:enumerate Assign Assign Assign For Return return:yes" - }, - { - "library": "django", - "name": "dimension", - "source_code": "@property def dimension(self): return capi.get_dims(self.ptr)", - "docstring": "Return 0 for points, 1 for lines, and 2 for surfaces.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", - "ast_data": "FunctionDef name:dimension arguments arg:self Return return:yes" - }, - { - "library": "cherrypy", - "name": "from_str", - "source_code": "@classmethod def from_str(cls, elementstr): ival, params = cls.parse(elementstr) return cls(ival, params)", - "docstring": "Construct an instance from a string of the form 'token;key=val'.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\lib\\httputil.py", - "ast_data": "FunctionDef name:from_str arguments arg:cls arg:elementstr Assign Call call:parse Return return:yes" - }, - { - "library": "scipy", - "name": "getValue", - "source_code": "def getValue(self): return self.data.item()", - "docstring": "Retrieve a scalar value from a of length one. Raises ------ ValueError If the netcdf variable is an array of length greater than one, this exception will be raised.", - "type": "method", - "file_path": "scipy\\scipy\\io\\_netcdf.py", - "ast_data": "FunctionDef name:getValue arguments arg:self Return return:yes" - }, - { - "library": "kornia", - "name": "get_rel_pos", - "source_code": "def get_rel_pos(q_size: int, k_size: int, rel_pos: Tensor) -> Tensor: max_rel_dist = int(2 * max(q_size, k_size) - 1) if rel_pos.shape[0] ! = max_rel_dist: rel_pos_resized = F.interpolate(rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1), size = max_rel_dist, mode = 'linear') rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0) else: rel_pos_resized = rel_pos q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0) k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0) relative_coords = q_coords - k_coords + (k_size - 1) * max(q_size / k_size, 1.0) return rel_pos_resized[relative_coords.long()]", - "docstring": "Get relative positional embeddings according to the relative positions of query and key sizes. Args: q_size: size of query q. k_size: size of key k. rel_pos: relative position embeddings (L, C). Returns: Extracted positional embeddings according to relative positions.", - "type": "function", - "file_path": "kornia\\kornia\\contrib\\models\\sam\\architecture\\image_encoder.py", - "ast_data": "FunctionDef name:get_rel_pos arguments arg:q_size type:int arg:k_size type:int arg:rel_pos type:Tensor Assign Call call:int If Compare op:NotEq Assign Call call:interpolate Assign Call call:permute Assign Assign Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "report_uninitialized_resources", - "source_code": "def report_uninitialized_resources(resource_list = None, name = 'report_uninitialized_resources'): if resource_list is None: resource_list = shared_resources() + local_resources() with ops.name_scope(name): local_device = os.environ.get('TF_DEVICE_FOR_UNINITIALIZED_VARIABLE_REPORTING', '/cpu: 0') with ops.device(local_device): if not resource_list: return array_ops.constant([], dtype = dtypes.string) variables_mask = math_ops.logical_not(array_ops_stack.stack([r.is_initialized for r in resource_list])) variable_names_tensor = array_ops.constant([s.handle.name for s in resource_list]) return array_ops.boolean_mask(variable_names_tensor, variables_mask)", - "docstring": "Returns the names of all uninitialized resources in resource_list. If the returned tensor is empty then all resources have been initialized. Args: resource_list: resources to check. If None, will use shared_resources() + local_resources(). name: name for the resource-checking op. Returns: Tensor containing names of the handles of all resources which have not yet been initialized.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\resources.py", - "ast_data": "FunctionDef name:report_uninitialized_resources arguments arg:resource_list arg:name If Compare op:Is Assign With Assign Call call:get With If Return return:yes Assign Call call:logical_not Assign Call call:constant Return return:yes" - }, - { - "library": "pygame", - "name": "empty", - "source_code": "def empty(self): for sprite in self.sprites(): self.remove_internal(sprite) sprite.remove_internal(self)", - "docstring": "remove all sprites Group.empty(): return None Removes all the sprites from the group.", - "type": "method", - "file_path": "pygame\\src_py\\sprite.py", - "ast_data": "FunctionDef name:empty arguments arg:self For Call call:sprites" - }, - { - "library": "django", - "name": "build_lookup", - "source_code": "def build_lookup(self, lookups, lhs, rhs): *transforms, lookup_name = lookups or ['exact'] for name in transforms: lhs = self.try_transform(lhs, name, lookups) lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: lhs = self.try_transform(lhs, lookup_name) lookup_name = 'exact' lookup_class = lhs.get_lookup(lookup_name) if not lookup_class: return lookup = lookup_class(lhs, rhs) if lookup.rhs is None and (not lookup.can_use_none_as_rhs): if lookup_name not in ('exact', 'iexact'): raise ValueError('Cannot use None as a query value') return lhs.get_lookup('isnull')(lhs, True) if lookup_name = = 'exact' and lookup.rhs = = '' and connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls: return lhs.get_lookup('isnull')(lhs, True) return lookup", - "docstring": "Try to extract transforms and lookup from given lhs. The lhs value is something that works like SQLExpression. The rhs value is what the lookup is going to compare against. The lookups is a list of names to extract using get_lookup() and get_transform().", - "type": "method", - "file_path": "django\\django\\db\\models\\sql\\query.py", - "ast_data": "FunctionDef name:build_lookup arguments arg:self arg:lookups arg:lhs arg:rhs Assign BoolOp For Assign Call call:try_transform Assign Call call:get_lookup If Assign Call call:try_transform Assign Assign Call call:get_lookup If Return return:no Assign Call call:lookup_class If BoolOp Compare op:Is If Compare op:NotIn Raise raises:ValueError('Cannot use None as a query value') Return return:yes If BoolOp Compare op:Eq Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "sphinx", - "name": "get_ratio", - "source_code": "def get_ratio(old: str, new: str) -> float: if not all([old, new]): return VERSIONING_RATIO if IS_SPEEDUP: return Levenshtein.distance(old, new) / (len(old) / 100.0) else: return levenshtein_distance(old, new) / (len(old) / 100.0)", - "docstring": "Return a \"similarity ratio\" (in percent) representing the similarity between the two strings where 0 is equal and anything above less than equal.", - "type": "function", - "file_path": "sphinx\\sphinx\\versioning.py", - "ast_data": "FunctionDef name:get_ratio arguments arg:old type:str arg:new type:str If Return return:yes If Return return:yes Return return:yes" - }, - { - "library": "mongo", - "name": "insert_many", - "source_code": "@_csot.apply def insert_many(self, documents: Iterable[Union[_DocumentType, RawBSONDocument]], ordered: bool = True, bypass_document_validation: Optional[bool] = None, session: Optional[ClientSession] = None, comment: Optional[Any] = None) -> InsertManyResult: if not isinstance(documents, abc.Iterable) or isinstance(documents, abc.Mapping) or (not documents): raise TypeError('documents must be a non-empty list') inserted_ids: list[ObjectId] = [] def gen() -> Iterator[tuple[int, Mapping[str, Any]]]: for document in documents: common.validate_is_document_type('document', document) if not isinstance(document, RawBSONDocument): if '_id' not in document: document['_id'] = ObjectId() inserted_ids.append(document['_id']) yield (message._INSERT, document) write_concern = self._write_concern_for(session) blk = _Bulk(self, ordered, bypass_document_validation, comment = comment) blk.ops = list(gen()) blk.execute(write_concern, session, _Op.INSERT) return InsertManyResult(inserted_ids, write_concern.acknowledged)", - "docstring": "Insert an iterable of documents. >>> db.test.count_documents({}) 0 >>> result = db.test.insert_many([{'x': i} for i in range(2)]) >>> result.inserted_ids [ObjectId('54f113fffba522406c9cc20e'), ObjectId('54f113fffba522406c9cc20f')] >>> db.test.count_documents({}) 2 :param documents: A iterable of documents to insert. :param ordered: If `~pymongo.client_session.ClientSession~pymongo.results.InsertManyResultwrites-and-idsbypass_document_validation` parameter. .. versionchanged:: 3.2 Added bypass_document_validation support .. versionadded:: 3.0", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\collection.py", - "ast_data": "FunctionDef name:insert_many arguments arg:self arg:documents type:Iterable[Union[_DocumentType, RawBSONDocument]] arg:ordered type:bool arg:bypass_document_validation type:Optional[bool] arg:session type:Optional[ClientSession] arg:comment type:Optional[Any] If BoolOp Call call:isinstance Raise raises:TypeError('documents must be a non-empty list') FunctionDef name:gen arguments For If If Compare op:NotIn Assign Call call:ObjectId Assign Call call:_write_concern_for Assign Call call:_Bulk Assign Call call:list Return return:yes" - }, - { - "library": "flexx", - "name": "selected", - "source_code": "@event.emitter def selected(self, filename): return {'filename': filename}", - "docstring": "Emitter that fires when the user selects a file. The emitted event has a \"filename\" attribute.", - "type": "method", - "file_path": "flexx\\flexx\\ui\\pywidgets\\_filebrowser.py", - "ast_data": "FunctionDef name:selected arguments arg:self arg:filename Return return:yes" - }, - { - "library": "django", - "name": "process_response", - "source_code": "def process_response(self, request, response): if hasattr(request, '_messages'): unstored_messages = request._messages.update(response) if unstored_messages and settings.DEBUG: raise ValueError('Not all temporary messages could be stored.') return response", - "docstring": "Update the storage backend (i.e., save the messages). Raise ValueError if not all messages could be stored and DEBUG is True.", - "type": "method", - "file_path": "django\\django\\contrib\\messages\\middleware.py", - "ast_data": "FunctionDef name:process_response arguments arg:self arg:request arg:response If Call call:hasattr Assign Call call:update If BoolOp Raise raises:ValueError('Not all temporary messages could be stored.') Return return:yes" - }, - { - "library": "mongo", - "name": "server_descriptions", - "source_code": "def server_descriptions(self) -> dict[_Address, ServerDescription]: return self._server_descriptions.copy()", - "docstring": "dict of (address, :class:).", - "type": "method", - "file_path": "mongo\\pymongo\\topology_description.py", - "ast_data": "FunctionDef name:server_descriptions arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "compile_mps_shader", - "source_code": "def compile_mps_shader(source: str) -> Any: try: return torch.mps.compile_shader(source) except SyntaxError as err: raise SyntaxError(f'failed to compile {source} with {err.msg}') from err", - "docstring": "Compiles shader source but raise more actionable error message when needed", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\runtime\\runtime_utils.py", - "ast_data": "FunctionDef name:compile_mps_shader arguments arg:source type:str Try Return return:yes ExceptHandler Raise raises:SyntaxError(f'failed to compile {source} with {err.msg}')" - }, - { - "library": "tensorflow", - "name": "children", - "source_code": "@classmethod def children(cls, obj, save_type = base.SaveType.CHECKPOINT, **kwargs): obj._maybe_initialize_trackable() children = {} for name, ref in obj._trackable_children(save_type, **kwargs).items(): ref = converter.convert_to_trackable(ref, parent = obj) children[name] = ref return children", - "docstring": "Returns all child trackables attached to obj. Args: obj: A object. save_type: A string, can be 'savedmodel' or 'checkpoint'. **kwargs: kwargs to use when retrieving the object's children. Returns: Dictionary of all children attached to the object with name to trackable.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\trackable_view.py", - "ast_data": "FunctionDef name:children arguments arg:cls arg:obj arg:save_type kwarg:kwargs Assign For Call call:items Assign Call call:convert_to_trackable Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "export", - "source_code": "def export(fn): fn._torchscript_modifier = FunctionModifiers.EXPORT return fn", - "docstring": "This decorator indicates that a method on an `ScriptModuleforward@torch.jit.exportimplicitly_compiled_methodmforwardanother_forwardimplicitly_compiled_methodunused_method@torch.jit.export` m = torch.jit.script(MyModule())", - "type": "function", - "file_path": "pytorch\\torch\\_jit_internal.py", - "ast_data": "FunctionDef name:export arguments arg:fn Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "ToolCursorPosition", - "source_code": "class ToolCursorPosition(ToolBase): def __init__(self, *args, **kwargs): self._id_drag = None super().__init__(*args, **kwargs) def set_figure(self, figure): if self._id_drag: self.canvas.mpl_disconnect(self._id_drag) super().set_figure(figure) if figure: self._id_drag = self.canvas.mpl_connect('motion_notify_event', self.send_message) def send_message(self, event): if self.toolmanager.messagelock.locked(): return from matplotlib.backend_bases import NavigationToolbar2 message = NavigationToolbar2._mouse_event_to_message(event) self.toolmanager.message_event(message, self)", - "docstring": "Send message with the current pointer position. This tool runs in the background reporting the position of the cursor.", - "type": "class", - "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", - "ast_data": "ClassDef name:ToolCursorPosition FunctionDef name:__init__ arguments arg:self vararg:args kwarg:kwargs Assign FunctionDef name:set_figure arguments arg:self arg:figure If If Assign Call call:mpl_connect FunctionDef name:send_message arguments arg:self arg:event If Call call:locked Return return:no Assign Call call:_mouse_event_to_message" - }, - { - "library": "seaborn", - "name": "default_range", - "source_code": "@property def default_range(self) -> tuple[float, float]: base = mpl.rcParams['font.size'] return (base * 0.5, base * 2)", - "docstring": "Min and max values used by default for semantic mapping.", - "type": "method", - "file_path": "seaborn\\seaborn\\_core\\properties.py", - "ast_data": "FunctionDef name:default_range arguments arg:self Assign Return return:yes" - }, - { - "library": "django", - "name": "get_geoms", - "source_code": "def get_geoms(self, geos = False): if geos: from django.contrib.gis.geos import GEOSGeometry return [GEOSGeometry(feat.geom.wkb) for feat in self] else: return [feat.geom for feat in self]", - "docstring": "Return a list containing the OGRGeometry for every Feature in the Layer.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py", - "ast_data": "FunctionDef name:get_geoms arguments arg:self arg:geos If Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "CommandParser", - "source_code": "class CommandParser(ArgumentParser): def __init__(self, *, missing_args_message = None, called_from_command_line = None, **kwargs): self.missing_args_message = missing_args_message self.called_from_command_line = called_from_command_line super().__init__(**kwargs) def parse_args(self, args = None, namespace = None): if self.missing_args_message and (not (args or any((not arg.startswith('-') for arg in args)))): self.error(self.missing_args_message) return super().parse_args(args, namespace) def error(self, message): if self.called_from_command_line: super().error(message) else: raise CommandError('Error: %s' % message) def add_subparsers(self, **kwargs): parser_class = kwargs.get('parser_class', type(self)) if issubclass(parser_class, CommandParser): kwargs['parser_class'] = partial(parser_class, called_from_command_line = self.called_from_command_line) return super().add_subparsers(**kwargs)", - "docstring": "Customized ArgumentParser class to improve some error messages and prevent SystemExit in several occasions, as SystemExit is unacceptable when a command is called programmatically.", - "type": "class", - "file_path": "django\\django\\core\\management\\base.py", - "ast_data": "ClassDef name:CommandParser FunctionDef name:__init__ arguments arg:self kwarg:kwargs Assign Assign FunctionDef name:parse_args arguments arg:self arg:args arg:namespace If BoolOp Return return:yes FunctionDef name:error arguments arg:self arg:message If Raise raises:CommandError('Error: %s' % message) FunctionDef name:add_subparsers arguments arg:self kwarg:kwargs Assign Call call:get If Call call:issubclass Assign Call call:partial Return return:yes" - }, - { - "library": "tensorflow", - "name": "is_uniform", - "source_code": "def is_uniform(self): return self._uniform_row_length is not None", - "docstring": "Returns true if the partition is known to be uniform statically. This is based upon the existence of self._uniform_row_length. For example: RowPartition.from_row_lengths([3,3,3]).is_uniform()==false RowPartition.from_uniform_row_length(5, nvals=20).is_uniform()==true RowPartition.from_row_lengths([2,0,2]).is_uniform()==false Returns: Whether a RowPartition is known to be uniform statically.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", - "ast_data": "FunctionDef name:is_uniform arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "is_compatible_with", - "source_code": "def is_compatible_with(self, spec_or_value): if not isinstance(spec_or_value, TypeSpec): spec_or_value = type_spec_from_value(spec_or_value) if type(self) is not type(spec_or_value): return False return self.__is_compatible(self._serialize(), spec_or_value._serialize())", - "docstring": "Returns true if is compatible with this TypeSpec. Prefer using \"is_subtype_of\" and \"most_specific_common_supertype\" wherever possible. Args: spec_or_value: A TypeSpec or TypeSpec associated value to compare against.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py", - "ast_data": "FunctionDef name:is_compatible_with arguments arg:self arg:spec_or_value If Assign Call call:type_spec_from_value If Compare op:IsNot Return return:yes Return return:yes" - }, - { - "library": "algorithms", - "name": "__mul__", - "source_code": "def __mul__(self, other: Union[int, float, Fraction, Monomial]): if isinstance(other, int) or isinstance(other, float) or isinstance(other, Fraction): result = Polynomial([]) monos = {m.clone() for m in self.all_monomials()} for m in monos: result = result.__add__(m.clone() * other) return result elif isinstance(other, Monomial): result = Polynomial([]) monos = {m.clone() for m in self.all_monomials()} for m in monos: result = result.__add__(m.clone() * other) return result elif isinstance(other, Polynomial): temp_self = {m.clone() for m in self.all_monomials()} temp_other = {m.clone() for m in other.all_monomials()} result = Polynomial([]) for i in temp_self: for j in temp_other: result = result.__add__(i * j) return result else: raise ValueError('Can only multiple int, float, Fraction, Monomials, or Polynomials with Polynomials.')", - "docstring": "Multiply a given polynomial to a copy of self.", - "type": "method", - "file_path": "algorithms\\algorithms\\maths\\polynomial.py", - "ast_data": "FunctionDef name:__mul__ arguments arg:self arg:other type:Union[int, float, Fraction, Monomial] If BoolOp Call call:isinstance Call call:isinstance Call call:isinstance Assign Call call:Polynomial Assign For Assign Call call:__add__ Return return:yes If Call call:isinstance Assign Call call:Polynomial Assign For Assign Call call:__add__ Return return:yes If Call call:isinstance Assign Assign Assign Call call:Polynomial For For Assign Call call:__add__ Return return:yes Raise raises:ValueError('Can only multiple int, float, Fraction, Monomials, or Polynomials with Polynomials.')" - }, - { - "library": "sphinx", - "name": "latex_visit_inheritance_diagram", - "source_code": "def latex_visit_inheritance_diagram(self: LaTeXTranslator, node: inheritance_diagram) -> None: graph = node['graph'] graph_hash = get_graph_hash(node) name = 'inheritance%s' % graph_hash dotcode = graph._generate_dot(name, config = self.config, graph_attrs = {'size': '\"6.0, 6.0\"'}) render_dot_latex(self, node, dotcode, {}, 'inheritance') raise nodes.SkipNode", - "docstring": "Output the graph for LaTeX. This will insert a PDF.", - "type": "function", - "file_path": "sphinx\\sphinx\\ext\\inheritance_diagram.py", - "ast_data": "FunctionDef name:latex_visit_inheritance_diagram arguments arg:self type:LaTeXTranslator arg:node type:inheritance_diagram Assign Assign Call call:get_graph_hash Assign Assign Call call:_generate_dot Raise raises:nodes.SkipNode" - }, - { - "library": "flexx", - "name": "add_extension", - "source_code": "def add_extension(self, extension_class): if not (isinstance(extension_class, type) and issubclass(extension_class, Extension)): raise TypeError('add_extension() expects a Extension class.') extension = extension_class() name = extension.name if not isinstance(name, str): raise TypeError('Extension name must be str.') if len(name) = = 0 or len(name) > 250: raise NameError('Extension names must be nonempty and shorter than 251 chars.') if name in self._extensions: logger.warning('BSDF warning: overwriting extension \"%s\", consider removing first' % name) cls = extension.cls if not cls: clss = [] elif isinstance(cls, (tuple, list)): clss = cls else: clss = [cls] for cls in clss: if not isinstance(cls, type): raise TypeError('Extension classes must be types.') for cls in clss: self._extensions_by_cls[cls] = (name, extension.encode) self._extensions[name] = extension return extension_class", - "docstring": "Add an extension to this serializer instance, which must be a subclass of Extension. Can be used as a decorator.", - "type": "method", - "file_path": "flexx\\flexx\\app\\bsdf_lite.py", - "ast_data": "FunctionDef name:add_extension arguments arg:self arg:extension_class If Raise raises:TypeError('add_extension() expects a Extension class.') Assign Call call:extension_class Assign If Raise raises:TypeError('Extension name must be str.') If BoolOp Compare op:Eq Compare op:Gt Raise raises:NameError('Extension names must be nonempty and shorter than 251 chars.') If Compare op:In Assign If Assign If Call call:isinstance Assign Assign For If Raise raises:TypeError('Extension classes must be types.') For Assign Assign Return return:yes" - }, - { - "library": "kornia", - "name": "filter2d_separable", - "source_code": "def filter2d_separable(input: Tensor, kernel_x: Tensor, kernel_y: Tensor, border_type: str = 'reflect', normalized: bool = False, padding: str = 'same') -> Tensor: out_x = filter2d(input, kernel_x[..., None, :], border_type, normalized, padding) out = filter2d(out_x, kernel_y[..., None], border_type, normalized, padding) return out", - "docstring": "Convolve a tensor with two 1d kernels, in x and y directions. The function applies a given kernel to a tensor. The kernel is applied independently at each depth channel of the tensor. Before applying the kernel, the function applies padding according to the specified mode so that the output remains in the same shape. Args: input: the input tensor with shape of :math:. kernel_x: the kernel to be convolved with the input tensor. The kernel shape must be :math: or :math:. kernel_y: the kernel to be convolved with the input tensor. The kernel shape must be :math: or :math:. border_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)`. Example: >>> input = torch.tensor([[[ ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 5., 0., 0.], ... [0., 0., 0., 0., 0.], ... [0., 0., 0., 0., 0.],]]]) >>> kernel = torch.ones(1, 3) >>> filter2d_separable(input, kernel, kernel, padding='same') tensor([[[[0., 0., 0., 0., 0.], [0., 5., 5., 5., 0.], [0., 5., 5., 5., 0.], [0., 5., 5., 5., 0.], [0., 0., 0., 0., 0.]]]])", - "type": "function", - "file_path": "kornia\\kornia\\filters\\filter.py", - "ast_data": "FunctionDef name:filter2d_separable arguments arg:input type:Tensor arg:kernel_x type:Tensor arg:kernel_y type:Tensor arg:border_type type:str arg:normalized type:bool arg:padding type:str Assign Call call:filter2d Assign Call call:filter2d Return return:yes" - }, - { - "library": "matplotlib", - "name": "ScaledTranslation", - "source_code": "class ScaledTranslation(Affine2DBase): def __init__(self, xt, yt, scale_trans, **kwargs): super().__init__(**kwargs) self._t = (xt, yt) self._scale_trans = scale_trans self.set_children(scale_trans) self._mtx = None self._inverted = None __str__ = _make_str_method('_t') def get_matrix(self): if self._invalid: self._mtx = IdentityTransform._mtx.copy() self._mtx[: 2, 2] = self._scale_trans.transform(self._t) self._invalid = 0 self._inverted = None return self._mtx", - "docstring": "A transformation that translates by *xt* and *yt*, after *xt* and *yt* have been transformed by *scale_trans*.", - "type": "class", - "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", - "ast_data": "ClassDef name:ScaledTranslation FunctionDef name:__init__ arguments arg:self arg:xt arg:yt arg:scale_trans kwarg:kwargs Assign Assign Assign Assign Assign Call call:_make_str_method FunctionDef name:get_matrix arguments arg:self If Assign Call call:copy Assign Call call:transform Assign Assign Return return:yes" - }, - { - "library": "django", - "name": "lazystr", - "source_code": "def lazystr(text): return lazy(str, str)(text)", - "docstring": "Shortcut for the common case of a lazy callable that returns str.", - "type": "function", - "file_path": "django\\django\\utils\\functional.py", - "ast_data": "FunctionDef name:lazystr arguments arg:text Return return:yes" - }, - { - "library": "scipy", - "name": "__init__", - "source_code": "@docfiller def __init__(self, file_stream, do_compression = False, unicode_strings = False, global_vars = None, long_field_names = False, oned_as = 'row'): self.file_stream = file_stream self.do_compression = do_compression self.unicode_strings = unicode_strings if global_vars: self.global_vars = global_vars else: self.global_vars = [] self.long_field_names = long_field_names self.oned_as = oned_as self._matrix_writer = None", - "docstring": "Initialize writer for matlab 5 format files Parameters ---------- %(do_compression)s %(unicode_strings)s global_vars : None or sequence of strings, optional Names of variables to be marked as global for matlab %(long_fields)s %(oned_as)s", - "type": "method", - "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:file_stream arg:do_compression arg:unicode_strings arg:global_vars arg:long_field_names arg:oned_as Assign Assign Assign If Assign Assign Assign Assign Assign" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, on_ui_exit = None, config = None): self._on_ui_exit = on_ui_exit self._command_handler_registry = debugger_cli_common.CommandHandlerRegistry() self._tab_completion_registry = debugger_cli_common.TabCompletionRegistry() self._tab_completion_registry.register_tab_comp_context([''], self.CLI_EXIT_COMMANDS + [debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND] + debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND_ALIASES) self._config = config or cli_config.CLIConfig() self._config_argparser = argparse.ArgumentParser(description = 'config command', usage = argparse.SUPPRESS) subparsers = self._config_argparser.add_subparsers() set_parser = subparsers.add_parser('set') set_parser.add_argument('property_name', type = str) set_parser.add_argument('property_value', type = str) set_parser = subparsers.add_parser('show') self.register_command_handler('config', self._config_command_handler, self._config_argparser.format_help(), prefix_aliases = ['cfg'])", - "docstring": "Constructor of the base class. Args: on_ui_exit: () the callback to be called when the UI exits. config: An instance of carrying user-facing configurations.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\base_ui.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:on_ui_exit arg:config Assign Assign Call call:CommandHandlerRegistry Assign Call call:TabCompletionRegistry Assign BoolOp Call call:CLIConfig Assign Call call:ArgumentParser Assign Call call:add_subparsers Assign Call call:add_parser Assign Call call:add_parser" - }, - { - "library": "scipy", - "name": "rvs", - "source_code": "def rvs(self, mu = None, kappa = 1, size = 1, random_state = None): dim, mu, kappa = self._process_parameters(mu, kappa) random_state = self._get_random_state(random_state) samples = self._rvs(dim, mu, kappa, size, random_state) return samples", - "docstring": "Draw random samples from a von Mises-Fisher distribution. Parameters ---------- mu : array_like Mean direction of the distribution. Must be a one-dimensional unit vector of norm 1. kappa : float Concentration parameter. Must be positive. size : int or tuple of ints, optional Given a shape of, for example, (m,n,k), m*n*k samples are generated, and packed in an m-by-n-by-k arrangement. Because each sample is N-dimensional, the output shape is (m,n,k,N). If no shape is specified, a single (N-D) sample is returned. random_state : {None, int, np.random.RandomState, np.random.Generator}, optional Used for drawing random variates. If is , the singleton is used. If is an int, a new `seedNonesizeNN` is the dimension of the distribution.", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_multivariate.py", - "ast_data": "FunctionDef name:rvs arguments arg:self arg:mu arg:kappa arg:size arg:random_state Assign Call call:_process_parameters Assign Call call:_get_random_state Assign Call call:_rvs Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, performed_action, run_metadata = None, client_graph_def = None, tf_error = None): _check_type(performed_action, str) self.performed_action = performed_action if run_metadata is not None: _check_type(run_metadata, config_pb2.RunMetadata) self.run_metadata = run_metadata self.client_graph_def = client_graph_def self.tf_error = tf_error", - "docstring": "Constructor for . Args: performed_action: () Actually-performed action by the debug-wrapper session. run_metadata: run_metadata output from the run() call (if any). client_graph_def: (GraphDef) GraphDef from the client side, i.e., from the python front end of TensorFlow. Can be obtained with session.graph.as_graph_def(). tf_error: (errors.OpError subtypes) TensorFlow OpError that occurred during the run (if any).", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\framework.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:performed_action arg:run_metadata arg:client_graph_def arg:tf_error Assign If Compare op:IsNot Assign Assign Assign" - }, - { - "library": "tensorflow", - "name": "allocator", - "source_code": "@property def allocator(self) -> str: return self._allocator", - "docstring": "Name of the allocator used to create this tensor (string).", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py", - "ast_data": "FunctionDef name:allocator arguments arg:self Return return:yes" - }, - { - "library": "kornia", - "name": "trans_z", - "source_code": "@classmethod def trans_z(cls, z: Tensor) -> Se3: zs = zeros_like(z) return cls.trans(zs, zs, z)", - "docstring": "Construct a z-axis translation. Args: z: the z-axis translation.", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py", - "ast_data": "FunctionDef name:trans_z arguments arg:cls arg:z type:Tensor Assign Call call:zeros_like Return return:yes" - }, - { - "library": "django", - "name": "has_perm", - "source_code": "def has_perm(self, perm, obj = None): if self.is_active and self.is_superuser: return True return _user_has_perm(self, perm, obj)", - "docstring": "Return True if the user has the specified permission. Query all available auth backends, but return immediately if any backend returns True. Thus, a user who has permission from a single auth backend is assumed to have permission in general. If an object is provided, check permissions for that object.", - "type": "method", - "file_path": "django\\django\\contrib\\auth\\models.py", - "ast_data": "FunctionDef name:has_perm arguments arg:self arg:perm arg:obj If BoolOp Return return:yes Return return:yes" - }, - { - "library": "salmon", - "name": "QueueReceiver", - "source_code": "class QueueReceiver: def __init__(self, queue_dir, sleep = 10, size_limit = 0, oversize_dir = None, workers = 10): self.queue = queue.Queue(queue_dir, pop_limit = size_limit, oversize_dir = oversize_dir) self.sleep = sleep self.workers = Pool(workers) def start(self, one_shot = False): logging.info('Queue receiver started on queue dir %s', self.queue.dir) logging.debug('Sleeping for %d seconds...', self.sleep) while not (len(self.queue) = = 0 and one_shot): if len(self.queue) = = 0: time.sleep(self.sleep) continue try: key, msg = self.queue.pop() except KeyError: logging.debug('Could not find message in Queue') continue logging.debug('Pulled message with key: %r off', key) self.workers.apply_async(self.process_message, args = (msg,)) self.workers.close() self.workers.join() def process_message(self, msg): try: logging.debug('Message received from Peer: %r, From: %r, to To %r.', msg.Peer, msg.From, msg.To) routing.Router.deliver(msg) except SMTPError as err: logging.exception('Raising SMTPError when running in a QueueReceiver is unsupported.') undeliverable_message(msg.Data, err.message) except Exception: logging.exception('Exception while processing message from Peer: %r, From: %r, to To %r.', msg.Peer, msg.From, msg.To) undeliverable_message(msg.Data, 'Router failed to catch exception.')", - "docstring": "Rather than listen on a socket this will watch a queue directory and process messages it receives from that. It works in almost the exact same way otherwise.", - "type": "class", - "file_path": "salmon\\salmon\\server.py", - "ast_data": "ClassDef name:QueueReceiver FunctionDef name:__init__ arguments arg:self arg:queue_dir arg:sleep arg:size_limit arg:oversize_dir arg:workers Assign Call call:Queue Assign Assign Call call:Pool FunctionDef name:start arguments arg:self arg:one_shot While If Compare op:Eq Try Assign Call call:pop ExceptHandler FunctionDef name:process_message arguments arg:self arg:msg Try ExceptHandler ExceptHandler" - }, - { - "library": "tensorflow", - "name": "can_decode", - "source_code": "def can_decode(self, value): if value.HasField('type_spec_value'): type_spec_class_enum = value.type_spec_value.type_spec_class return type_spec_class_enum = = struct_pb2.TypeSpecProto.EXTENSION_TYPE_SPEC return False", - "docstring": "Returns true if can be decoded into a .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\extension_type.py", - "ast_data": "FunctionDef name:can_decode arguments arg:self arg:value If Call call:HasField Assign Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "remove_unused_levels", - "source_code": "def remove_unused_levels(self) -> MultiIndex: new_levels = [] new_codes = [] changed = False for lev, level_codes in zip(self.levels, self.codes): uniques = np.where(np.bincount(level_codes + 1) > 0)[0] - 1 has_na = int(len(uniques) and uniques[0] = = -1) if len(uniques) ! = len(lev) + has_na: if lev.isna().any() and len(uniques) = = len(lev): break changed = True uniques = algos.unique(level_codes) if has_na: na_idx = np.where(uniques = = -1)[0] uniques[[0, na_idx[0]]] = uniques[[na_idx[0], 0]] code_mapping = np.zeros(len(lev) + has_na) code_mapping[uniques] = np.arange(len(uniques)) - has_na level_codes = code_mapping[level_codes] lev = lev.take(uniques[has_na:]) new_levels.append(lev) new_codes.append(level_codes) result = self.view() if changed: result._reset_identity() result._set_levels(new_levels, validate = False) result._set_codes(new_codes, validate = False) return result", - "docstring": "Create new MultiIndex from current that removes unused levels. Unused level(s) means levels that are not expressed in the labels. The resulting MultiIndex will have the same outward appearance, meaning the same .values and ordering. It will also be .equals() to the original. The method is useful in cases where you have a MultiIndex with hierarchical levels, but some of these levels are no longer needed due to filtering or subsetting operations. By removing the unused levels, the resulting MultiIndex becomes more compact and efficient, which can improve performance in subsequent operations. Returns ------- MultiIndex A new MultiIndex with unused levels removed. See Also -------- MultiIndex.droplevel : Remove specified levels from a MultiIndex. MultiIndex.reorder_levels : Rearrange levels of a MultiIndex. MultiIndex.set_levels : Set new levels on a MultiIndex. Examples -------- >>> mi = pd.MultiIndex.from_product([range(2), list(\"ab\")]) >>> mi MultiIndex([(0, 'a'), (0, 'b'), (1, 'a'), (1, 'b')], ) >>> mi[2:] MultiIndex([(1, 'a'), (1, 'b')], ) The 0 from the first level is not represented and can be removed >>> mi2 = mi[2:].remove_unused_levels() >>> mi2.levels FrozenList([[1], ['a', 'b']])", - "type": "method", - "file_path": "pandas\\pandas\\core\\indexes\\multi.py", - "ast_data": "FunctionDef name:remove_unused_levels arguments arg:self Assign Assign Assign For Call call:zip Assign Assign Call call:int If Compare op:NotEq If BoolOp Call call:any Compare op:Eq Assign Assign Call call:unique If Assign Assign Assign Call call:zeros Assign Assign Assign Call call:take Assign Call call:view If Return return:yes" - }, - { - "library": "tensorflow", - "name": "set_weights", - "source_code": "def set_weights(self, weights): params = self.weights expected_num_weights = 0 for param in params: if isinstance(param, base_layer_utils.TrackableWeightHandler): expected_num_weights + = param.num_tensors else: expected_num_weights + = 1 if expected_num_weights ! = len(weights): raise ValueError('You called `set_weights(weights)` on layer \"%s\" with a weight list of length %s, but the layer was expecting %s weights. Provided weights: %s...' % (self.name, len(weights), expected_num_weights, str(weights)[: 50])) weight_index = 0 weight_value_tuples = [] for param in params: if isinstance(param, base_layer_utils.TrackableWeightHandler): num_tensors = param.num_tensors tensors = weights[weight_index: weight_index + num_tensors] param.set_weights(tensors) weight_index + = num_tensors else: weight = weights[weight_index] weight_shape = weight.shape if hasattr(weight, 'shape') else () ref_shape = param.shape if not ref_shape.is_compatible_with(weight_shape): raise ValueError('Layer weight shape %s not compatible with provided weight shape %s' % (ref_shape, weight_shape)) weight_value_tuples.append((param, weight)) weight_index + = 1 backend.batch_set_value(weight_value_tuples)", - "docstring": "Sets the weights of the layer, from Numpy arrays. The weights of a layer represent the state of the layer. This function sets the weight values from numpy arrays. The weight values should be passed in the order they are created by the layer. Note that the layer's weights must be instantiated before calling this function by calling the layer. For example, a Dense layer returns a list of two values-- per-output weights and the bias value. These can be used to set the weights of another Dense layer: >>> a = tf.keras.layers.Dense(1, ... kernel_initializer=tf.constant_initializer(1.)) >>> a_out = a(tf.convert_to_tensor([[1., 2., 3.]])) >>> a.get_weights() [array([[1.], [1.], [1.]], dtype=float32), array([0.], dtype=float32)] >>> b = tf.keras.layers.Dense(1, ... kernel_initializer=tf.constant_initializer(2.)) >>> b_out = b(tf.convert_to_tensor([[10., 20., 30.]])) >>> b.get_weights() [array([[2.], [2.], [2.]], dtype=float32), array([0.], dtype=float32)] >>> b.set_weights(a.get_weights()) >>> b.get_weights() [array([[1.], [1.], [1.]], dtype=float32), array([0.], dtype=float32)] Args: weights: a list of Numpy arrays. The number of arrays and their shape must match number of the dimensions of the weights of the layer (i.e. it should match the output of ). Raises: ValueError: If the provided weights list does not match the layer's specifications.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_v1.py", - "ast_data": "FunctionDef name:set_weights arguments arg:self arg:weights Assign Assign For If Call call:isinstance If Compare op:NotEq Raise raises:ValueError('You called `set_weights(weights)` on layer \"%s\" with a weight list of length %s, but the layer was expecting %s weights. Provided weights: %s...' % (self.name, len(weights), expected_num_weights, str(weights)[:50])) Assign Assign For If Call call:isinstance Assign Assign Assign Assign Assign If Raise raises:ValueError('Layer weight shape %s not compatible with provided weight shape %s' % (ref_shape, weight_shape))" - }, - { - "library": "tensorflow", - "name": "with_flat_values", - "source_code": "def with_flat_values(self, new_values): if isinstance(self._values, RaggedTensor): return self.with_values(self.values.with_flat_values(new_values)) else: new_values = _convert_to_ragged_tensor_values(new_values) return self.with_values(new_values)", - "docstring": "Returns a copy of with replaced by . Preserves cached row-partitioning tensors such as and if they have values. Args: new_values: Potentially ragged tensor that should replace . Must have , and must have the same number of rows as . Returns: A . . .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor.py", - "ast_data": "FunctionDef name:with_flat_values arguments arg:self arg:new_values If Call call:isinstance Return return:yes Assign Call call:_convert_to_ragged_tensor_values Return return:yes" - }, - { - "library": "django", - "name": "with_perm", - "source_code": "def with_perm(self, perm, is_active = True, include_superusers = True, obj = None): if isinstance(perm, str): try: app_label, codename = perm.split('.') except ValueError: raise ValueError('Permission name should be in the form app_label.permission_codename.') elif not isinstance(perm, Permission): raise TypeError('The `perm` argument must be a string or a permission instance.') if obj is not None: return UserModel._default_manager.none() permission_q = Q(group__user = OuterRef('pk')) | Q(user = OuterRef('pk')) if isinstance(perm, Permission): permission_q & = Q(pk = perm.pk) else: permission_q & = Q(codename = codename, content_type__app_label = app_label) user_q = Exists(Permission.objects.filter(permission_q)) if include_superusers: user_q | = Q(is_superuser = True) if is_active is not None: user_q & = Q(is_active = is_active) return UserModel._default_manager.filter(user_q)", - "docstring": "Return users that have permission \"perm\". By default, filter out inactive users and include superusers.", - "type": "method", - "file_path": "django\\django\\contrib\\auth\\backends.py", - "ast_data": "FunctionDef name:with_perm arguments arg:self arg:perm arg:is_active arg:include_superusers arg:obj If Call call:isinstance Try Assign Call call:split ExceptHandler Raise raises:ValueError('Permission name should be in the form app_label.permission_codename.') If Raise raises:TypeError('The `perm` argument must be a string or a permission instance.') If Compare op:IsNot Return return:yes Assign If Call call:isinstance Assign Call call:Exists If If Compare op:IsNot Return return:yes" - }, - { - "library": "pandas", - "name": "size", - "source_code": "@final @property def size(self) -> int: return int(np.prod(self.shape))", - "docstring": "Return an int representing the number of elements in this object. Return the number of rows if Series. Otherwise return the number of rows times number of columns if DataFrame. See Also -------- numpy.ndarray.size : Number of elements in the array. Examples -------- >>> s = pd.Series({\"a\": 1, \"b\": 2, \"c\": 3}) >>> s.size 3 >>> df = pd.DataFrame({\"col1\": [1, 2], \"col2\": [3, 4]}) >>> df.size 4", - "type": "method", - "file_path": "pandas\\pandas\\core\\generic.py", - "ast_data": "FunctionDef name:size arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "watch_variable", - "source_code": "def watch_variable(self, v): if isinstance(v, resource_variable_ops.ResourceVariable) and v.handle in self._resource_tensor_inputs: return while self is not None and isinstance(self, FuncGraph): self._watched_variables.add(v) self = self.outer_graph", - "docstring": "Marks the variable v as accessed while building this graph.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py", - "ast_data": "FunctionDef name:watch_variable arguments arg:self arg:v If BoolOp Call call:isinstance Compare op:In Return return:no While BoolOp Compare op:IsNot Call call:isinstance Assign" - }, - { - "library": "pytorch", - "name": "split_outside_bracket", - "source_code": "def split_outside_bracket(line: str, delimiter: str = ', ') -> list[str]: bracket_count = 0 curr_token = '' res = [] for char in line: if char = = '[': bracket_count + = 1 elif char = = ']': bracket_count - = 1 elif char = = delimiter and bracket_count = = 0: res.append(curr_token) curr_token = '' continue curr_token + = char res.append(curr_token) return res", - "docstring": "Given a line of text, split it on comma unless the comma is within a bracket '[]'.", - "type": "function", - "file_path": "pytorch\\torch\\utils\\data\\datapipes\\gen_pyi.py", - "ast_data": "FunctionDef name:split_outside_bracket arguments arg:line type:str arg:delimiter type:str Assign Assign Assign For If Compare op:Eq If Compare op:Eq If BoolOp Compare op:Eq Compare op:Eq Assign Return return:yes" - }, - { - "library": "scipy", - "name": "in_simplex", - "source_code": "def in_simplex(self, S, v_x, A_j0 = None): A_11 = np.delete(S, 0, 0) - S[0] sign_det_A_11 = np.sign(np.linalg.det(A_11)) if sign_det_A_11 = = 0: sign_det_A_11 = -1 if A_j0 is None: A_j0 = S - v_x for d in range(self.dim + 1): det_A_jj = (-1) ** d * sign_det_A_11 sign_det_A_j0 = np.sign(np.linalg.det(np.delete(A_j0, d, 0))) if det_A_jj = = sign_det_A_j0: continue else: return False return True", - "docstring": "Check if a vector v_x is in simplex . Parameters ---------- S : array_like Array containing simplex entries of vertices as rows v_x : A candidate vertex A_j0 : array, optional, Allows for A_j0 to be pre-calculated Returns ------- res : boolean True if is in", - "type": "method", - "file_path": "scipy\\scipy\\optimize\\_shgo_lib\\_complex.py", - "ast_data": "FunctionDef name:in_simplex arguments arg:self arg:S arg:v_x arg:A_j0 Assign Assign Call call:sign If Compare op:Eq Assign If Compare op:Is Assign For Call call:range Assign Assign Call call:sign If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "checkpoint_id", - "source_code": "@property def checkpoint_id(self) -> Union[str, os.PathLike]: return self.path", - "docstring": "return the checkpoint_id that will be used to load the checkpoint.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\checkpoint\\filesystem.py", - "ast_data": "FunctionDef name:checkpoint_id arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "instance_norm", - "source_code": "def instance_norm(input: Tensor, running_mean: Optional[Tensor] = None, running_var: Optional[Tensor] = None, weight: Optional[Tensor] = None, bias: Optional[Tensor] = None, use_input_stats: bool = True, momentum: float = 0.1, eps: float = 1e-05) -> Tensor: if has_torch_function_variadic(input, running_mean, running_var, weight, bias): return handle_torch_function(instance_norm, (input, running_mean, running_var, weight, bias), input, running_mean = running_mean, running_var = running_var, weight = weight, bias = bias, use_input_stats = use_input_stats, momentum = momentum, eps = eps) if use_input_stats: _verify_spatial_size(input.size()) return torch.instance_norm(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, torch.backends.cudnn.enabled)", - "docstring": "Apply Instance Normalization independently for each channel in every data sample within a batch. See :class:, :class:, :class: for details.", - "type": "function", - "file_path": "pytorch\\torch\\nn\\functional.py", - "ast_data": "FunctionDef name:instance_norm arguments arg:input type:Tensor arg:running_mean type:Optional[Tensor] arg:running_var type:Optional[Tensor] arg:weight type:Optional[Tensor] arg:bias type:Optional[Tensor] arg:use_input_stats type:bool arg:momentum type:float arg:eps type:float If Call call:has_torch_function_variadic Return return:yes If Return return:yes" - }, - { - "library": "tensorflow", - "name": "call_function", - "source_code": "def call_function(self, name, tensor_inputs, num_outputs): attrs = tuple(itertools.chain(*self.function_call_options.as_attrs().items())) cancellation_context = cancellation.context() if cancellation_context is None: outputs = execute.execute(name.decode('utf-8'), num_outputs = num_outputs, inputs = tensor_inputs, attrs = attrs, ctx = self) else: outputs = execute.execute_with_cancellation(name.decode('utf-8'), num_outputs = num_outputs, inputs = tensor_inputs, attrs = attrs, ctx = self, cancellation_manager = cancellation_context) outputs = outputs or None return outputs", - "docstring": "Calls the function associated with the given name.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", - "ast_data": "FunctionDef name:call_function arguments arg:self arg:name arg:tensor_inputs arg:num_outputs Assign Call call:tuple Assign Call call:context If Compare op:Is Assign Call call:execute Assign Call call:execute_with_cancellation Assign BoolOp Return return:yes" - }, - { - "library": "tensorflow", - "name": "Graph", - "source_code": "class Graph(collections.namedtuple('Graph', ['entry', 'exit', 'error', 'index', 'stmt_prev', 'stmt_next'])): def __repr__(self): return self.as_dot() def as_dot(self): result = 'digraph CFG {\\n' for node in self.index.values(): result + = ' %s [label = \"%s\"];\\n' % (id(node), node) for node in self.index.values(): for next_ in node.next: result + = ' %s -> %s;\\n' % (id(node), id(next_)) result + = '}' return result", - "docstring": "A Control Flow Graph. The CFG maintains an index to allow looking up a CFG node by the AST node to which it is associated. The index can also be enumerated in top-down, depth first order. Walking the graph in forward or reverse order is supported by double parent-child links. Note: the error nodes are not wired to their corresponding finally guards, because these are shared, and wiring them would create a reverse path from normal control flow into the error nodes, which we want to avoid. The graph also maintains edges corresponding to higher level statements like for-else loops. A node is considered successor of a statement if there is an edge from a node that is lexically a child of that statement to a node that is not. Statement predecessors are analogously defined. Attributes: entry: Node, the entry node exit: FrozenSet[Node, ...], the exit nodes error: FrozenSet[Node, ...], nodes that exit due to an explicitly raised error (errors propagated from function calls are not accounted) index: Dict[ast.Node, Node], mapping AST nodes to the respective CFG node stmt_prev: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST nodes to their predecessor CFG nodes stmt_next: Dict[ast.Node, FrozenSet[Node, ...]], mapping statement AST nodes to their successor CFG nodes", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\cfg.py", - "ast_data": "ClassDef name:Graph Call call:namedtuple FunctionDef name:__repr__ arguments arg:self Return return:yes FunctionDef name:as_dot arguments arg:self Assign For Call call:values For Call call:values For Return return:yes" - }, - { - "library": "algorithms", - "name": "__init__", - "source_code": "def __init__(self, size): self.queue = deque(maxlen = size)", - "docstring": "Initialize your data structure here. :type size: int", - "type": "method", - "file_path": "algorithms\\algorithms\\queues\\moving_average.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:size Assign Call call:deque" - }, - { - "library": "scipy", - "name": "rfft2", - "source_code": "@_dispatch def rfft2(x, s = None, axes = (-2, -1), norm = None, overwrite_x = False, workers = None, *, plan = None): return (Dispatchable(x, np.ndarray),)", - "docstring": "Compute the 2-D FFT of a real array. Parameters ---------- x : array Input array, taken to be real. s : sequence of ints, optional Shape of the FFT. axes : sequence of ints, optional Axes over which to compute the FFT. norm : {\"backward\", \"ortho\", \"forward\"}, optional Normalization mode (see ). Default is \"backward\". overwrite_x : bool, optional If True, the contents of can be destroyed; the default is False. See :func: for more details. workers : int, optional Maximum number of workers to use for parallel computation. If negative, the value wraps around from `~scipy.fft.fftrfftnrfftn`. Examples -------- >>> import scipy.fft >>> import numpy as np >>> x = np.broadcast_to([1, 0, -1, 0], (4, 4)) >>> scipy.fft.rfft2(x) array([[0.+0.j, 8.+0.j, 0.+0.j], [0.+0.j, 0.+0.j, 0.+0.j], [0.+0.j, 0.+0.j, 0.+0.j], [0.+0.j, 0.+0.j, 0.+0.j]])", - "type": "function", - "file_path": "scipy\\scipy\\fft\\_basic.py", - "ast_data": "FunctionDef name:rfft2 arguments arg:x arg:s arg:axes arg:norm arg:overwrite_x arg:workers Return return:yes" - }, - { - "library": "scikit-learn", - "name": "split", - "source_code": "def split(self, X, y = None, groups = None): return super().split(X, y, groups)", - "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,), default=None The target variable for supervised learning problems. groups : array-like of shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py", - "ast_data": "FunctionDef name:split arguments arg:self arg:X arg:y arg:groups Return return:yes" - }, - { - "library": "numpy", - "name": "StructuredVoidFormat", - "source_code": "class StructuredVoidFormat: def __init__(self, format_functions): self.format_functions = format_functions @classmethod def from_data(cls, data, **options): format_functions = [] for field_name in data.dtype.names: format_function = _get_format_function(data[field_name], **options) if data.dtype[field_name].shape ! = (): format_function = SubArrayFormat(format_function, **options) format_functions.append(format_function) return cls(format_functions) def __call__(self, x): str_fields = [format_function(field) for field, format_function in zip(x, self.format_functions)] if len(str_fields) = = 1: return f'({str_fields[0]},)' else: return f'({', '.join(str_fields)})'", - "docstring": "Formatter for structured np.void objects. This does not work on structured alias types like np.dtype(('i4', 'i2,i2')), as alias scalars lose their field information, and the implementation relies upon np.void.__getitem__.", - "type": "class", - "file_path": "numpy\\numpy\\_core\\arrayprint.py", - "ast_data": "ClassDef name:StructuredVoidFormat FunctionDef name:__init__ arguments arg:self arg:format_functions Assign FunctionDef name:from_data arguments arg:cls arg:data kwarg:options Assign For Assign Call call:_get_format_function If Compare op:NotEq Assign Call call:SubArrayFormat Return return:yes FunctionDef name:__call__ arguments arg:self arg:x Assign If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "scrapy", - "name": "TopLevelFormatter", - "source_code": "class TopLevelFormatter(logging.Filter): def __init__(self, loggers: list[str] | None = None): super().__init__() self.loggers: list[str] = loggers or [] def filter(self, record: logging.LogRecord) -> bool: if any((record.name.startswith(logger + '.') for logger in self.loggers)): record.name = record.name.split('.', 1)[0] return True", - "docstring": "Keep only top level loggers' name (direct children from root) from records. This filter will replace Scrapy loggers' names with 'scrapy'. This mimics the old Scrapy log behaviour and helps shortening long names. Since it can't be set for just one logger (it won't propagate for its children), it's going to be set in the root handler, with a parametrized `` list where it should act.", - "type": "class", - "file_path": "scrapy\\scrapy\\utils\\log.py", - "ast_data": "ClassDef name:TopLevelFormatter FunctionDef name:__init__ arguments arg:self arg:loggers type:list[str] | None FunctionDef name:filter arguments arg:self arg:record type:logging.LogRecord If Call call:any Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "broadcast_tensors", - "source_code": "def broadcast_tensors(*tensors): if has_torch_function(tensors): return handle_torch_function(broadcast_tensors, tensors, *tensors) return _VF.broadcast_tensors(tensors)", - "docstring": "broadcast_tensors(*tensors) -> List of Tensors Broadcasts the given tensors according to :ref:. Args: *tensors: any number of tensors of the same type .. warning:: More than one element of a broadcasted tensor may refer to a single memory location. As a result, in-place operations (especially ones that are vectorized) may result in incorrect behavior. If you need to write to the tensors, please clone them first. Example:: >>> x = torch.arange(3).view(1, 3) >>> y = torch.arange(2).view(2, 1) >>> a, b = torch.broadcast_tensors(x, y) >>> a.size() torch.Size([2, 3]) >>> a tensor([[0, 1, 2], [0, 1, 2]])", - "type": "function", - "file_path": "pytorch\\torch\\functional.py", - "ast_data": "FunctionDef name:broadcast_tensors arguments vararg:tensors If Call call:has_torch_function Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "write", - "source_code": "def write(self, arr, name): if scipy.sparse.issparse(arr): self.write_sparse(arr, name) return arr = np.asarray(arr) dt = arr.dtype if not dt.isnative: arr = arr.astype(dt.newbyteorder(' = ')) dtt = dt.type if dtt is np.object_: raise TypeError('Cannot save object arrays in Mat4') elif dtt is np.void: raise TypeError('Cannot save void type arrays') elif dtt in (np.str_, np.bytes_): self.write_char(arr, name) return self.write_numeric(arr, name)", - "docstring": "Write matrix , with name Parameters ---------- arr : array_like array to write name : str name in matlab workspace", - "type": "method", - "file_path": "scipy\\scipy\\io\\matlab\\_mio4.py", - "ast_data": "FunctionDef name:write arguments arg:self arg:arr arg:name If Call call:issparse Return return:no Assign Call call:asarray Assign If Assign Call call:astype Assign If Compare op:Is Raise raises:TypeError('Cannot save object arrays in Mat4') If Compare op:Is Raise raises:TypeError('Cannot save void type arrays') If Compare op:In Return return:no" - }, - { - "library": "pandas", - "name": "buffer_to_ndarray", - "source_code": "def buffer_to_ndarray(buffer: Buffer, dtype: tuple[DtypeKind, int, str, str], *, length: int, offset: int = 0) -> np.ndarray: kind, bit_width, _, _ = dtype column_dtype = _NP_DTYPES.get(kind, {}).get(bit_width, None) if column_dtype is None: raise NotImplementedError(f'Conversion for {dtype} is not yet supported.') ctypes_type = np.ctypeslib.as_ctypes_type(column_dtype) if bit_width = = 1: assert length is not None, '`length` must be specified for a bit-mask buffer.' pa = import_optional_dependency('pyarrow') arr = pa.BooleanArray.from_buffers(pa.bool_(), length, [None, pa.foreign_buffer(buffer.ptr, length)], offset = offset) return np.asarray(arr) else: data_pointer = ctypes.cast(buffer.ptr + offset * bit_width // 8, ctypes.POINTER(ctypes_type)) if length > 0: return np.ctypeslib.as_array(data_pointer, shape = (length,)) return np.array([], dtype = ctypes_type)", - "docstring": "Build a NumPy array from the passed buffer. Parameters ---------- buffer : Buffer Buffer to build a NumPy array from. dtype : tuple Data type of the buffer conforming protocol dtypes format. offset : int, default: 0 Number of elements to offset from the start of the buffer. length : int, optional If the buffer is a bit-mask, specifies a number of bits to read from the buffer. Has no effect otherwise. Returns ------- np.ndarray Notes ----- The returned array doesn't own the memory. The caller of this function is responsible for keeping the memory owner object alive as long as the returned NumPy array is being used.", - "type": "function", - "file_path": "pandas\\pandas\\core\\interchange\\from_dataframe.py", - "ast_data": "FunctionDef name:buffer_to_ndarray arguments arg:buffer type:Buffer arg:dtype type:tuple[DtypeKind, int, str, str] Assign Assign Call call:get If Compare op:Is Raise raises:NotImplementedError(f'Conversion for {dtype} is not yet supported.') Assign Call call:as_ctypes_type If Compare op:Eq Assign Call call:import_optional_dependency Assign Call call:from_buffers Return return:yes Assign Call call:cast If Compare op:Gt Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "devices", - "source_code": "@property @deprecation.deprecated(None, 'Please avoid relying on devices property.') def devices(self): require_replica_context(self) return (device_util.current(),)", - "docstring": "Returns the devices this replica is to be executed on, as a tuple of strings. NOTE: For and , this returns a nested list of device strings, e.g., [[\"GPU:0\"]].", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", - "ast_data": "FunctionDef name:devices arguments arg:self Call call:deprecated Return return:yes" - }, - { - "library": "pytorch", - "name": "get_prologue_template_epilogue", - "source_code": "@staticmethod def get_prologue_template_epilogue(nodes: list[BaseSchedulerNode]) -> tuple[list[BaseSchedulerNode], BaseSchedulerNode, list[BaseSchedulerNode]]: template_index = next((i for i, n in enumerate(nodes) if n.is_template())) prologue = nodes[: template_index] template_node = nodes[template_index] epilogue = nodes[template_index + 1:] return (prologue, template_node, epilogue)", - "docstring": "For the list of nodes, get the prologue, template, and epilogue", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\scheduler.py", - "ast_data": "FunctionDef name:get_prologue_template_epilogue arguments arg:nodes type:list[BaseSchedulerNode] Assign Call call:next Assign Assign Assign Return return:yes" - }, - { - "library": "kornia", - "name": "Denormalize", - "source_code": "class Denormalize(Module): def __init__(self, mean: Union[Tensor, float], std: Union[Tensor, float]) -> None: super().__init__() self.mean = mean self.std = std def forward(self, input: Tensor) -> Tensor: return denormalize(input, self.mean, self.std) def __repr__(self) -> str: repr = f'(mean = {self.mean}, std = {self.std})' return self.__class__.__name__ + repr", - "docstring": "Denormalize a tensor image with mean and standard deviation. .. math:: \\text{input[channel] = (input[channel] * std[channel]) + mean[channel]} Where is :math: and :math: for channels, Args: mean: Mean for each channel. std: Standard deviations for each channel. Shape: - Input: Image tensor of size :math:. - Output: Denormalised tensor with same size as input :math:. Examples: >>> x = torch.rand(1, 4, 3, 3) >>> out = Denormalize(0.0, 255.)(x) >>> out.shape torch.Size([1, 4, 3, 3]) >>> x = torch.rand(1, 4, 3, 3, 3) >>> mean = torch.zeros(1, 4) >>> std = 255. * torch.ones(1, 4) >>> out = Denormalize(mean, std)(x) >>> out.shape torch.Size([1, 4, 3, 3, 3])", - "type": "class", - "file_path": "kornia\\kornia\\enhance\\normalize.py", - "ast_data": "ClassDef name:Denormalize FunctionDef name:__init__ arguments arg:self arg:mean type:Union[Tensor, float] arg:std type:Union[Tensor, float] Assign Assign FunctionDef name:forward arguments arg:self arg:input type:Tensor Return return:yes FunctionDef name:__repr__ arguments arg:self Assign Return return:yes" - }, - { - "library": "pandas", - "name": "is_nested_object", - "source_code": "def is_nested_object(obj) -> bool: return bool(isinstance(obj, ABCSeries) and is_object_dtype(obj.dtype) and any((isinstance(v, ABCSeries) for v in obj._values)))", - "docstring": "return a boolean if we have a nested object, e.g. a Series with 1 or more Series elements This may not be necessarily be performant.", - "type": "function", - "file_path": "pandas\\pandas\\core\\dtypes\\cast.py", - "ast_data": "FunctionDef name:is_nested_object arguments arg:obj Return return:yes" - }, - { - "library": "pytorch", - "name": "hardtanh", - "source_code": "@register_decomposition(aten.hardtanh) @_inplace_wrapper @out_wrapper() @elementwise_unary_scalar_wrapper @elementwise_type_promotion_wrapper(type_promoting_args = 'a', type_promotion_kind = ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT) def hardtanh(a: TensorLikeType, min_val: NumberType = -1, max_val: NumberType = 1, inplace: bool = False) -> TensorLikeType: if inplace: raise NotImplementedError if utils.is_boolean_dtype(a.dtype): raise RuntimeError('Bool inputs not supported for hardtanh') if utils.is_integer_dtype(a.dtype): min_val = int(min_val) max_val = int(max_val) if not (a.dtype ! = torch.uint8 or (min_val > = 0 and max_val > = 0)): raise RuntimeError('Cannot do hardtanh on an unsigned type with negative limits') if min_val > max_val: raise ValueError('min_val cannot be greater than max_val') return torch.clamp(a, min_val, max_val)", - "docstring": "Reference implementation of torch.nn.functional.hardtanh", - "type": "function", - "file_path": "pytorch\\torch\\_refs\\nn\\functional\\__init__.py", - "ast_data": "FunctionDef name:hardtanh arguments arg:a type:TensorLikeType arg:min_val type:NumberType arg:max_val type:NumberType arg:inplace type:bool Call call:register_decomposition Call call:out_wrapper Call call:elementwise_type_promotion_wrapper If Raise raises:NotImplementedError If Call call:is_boolean_dtype Raise raises:RuntimeError('Bool inputs not supported for hardtanh') If Call call:is_integer_dtype Assign Call call:int Assign Call call:int If Raise raises:RuntimeError('Cannot do hardtanh on an unsigned type with negative limits') If Compare op:Gt Raise raises:ValueError('min_val cannot be greater than max_val') Return return:yes" - }, - { - "library": "pytorch", - "name": "upcast", - "source_code": "def upcast(func): @functools.wraps(func) def wrapped(tensor, *args, **kwds): target_dtype = _dtypes_impl.default_dtypes().complex_dtype if tensor.is_complex() else _dtypes_impl.default_dtypes().float_dtype tensor = _util.cast_if_needed(tensor, target_dtype) return func(tensor, *args, **kwds) return wrapped", - "docstring": "NumPy fft casts inputs to 64 bit and *returns 64-bit results*.", - "type": "function", - "file_path": "pytorch\\torch\\_numpy\\fft.py", - "ast_data": "FunctionDef name:upcast arguments arg:func FunctionDef name:wrapped arguments arg:tensor vararg:args kwarg:kwds Call call:wraps Assign Assign Call call:cast_if_needed Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "newer", - "source_code": "def newer(dst, src): if not os.path.exists(dst): raise ValueError(f\"file '{os.path.abspath(dst)}' does not exist\") if not os.path.exists(src): return 1 mtime1 = os.stat(dst)[ST_MTIME] mtime2 = os.stat(src)[ST_MTIME] return mtime1 > mtime2", - "docstring": "Return true if 'dst' exists and is more recently modified than 'src', or if 'dst' exists and 'src' doesn't. Return false if both exist and 'dst' is the same age or younger than 'src'.", - "type": "function", - "file_path": "scipy\\scipy\\_build_utils\\_wrappers_common.py", - "ast_data": "FunctionDef name:newer arguments arg:dst arg:src If Raise raises:ValueError(f\"file '{os.path.abspath(dst)}' does not exist\") If Return return:yes Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "pad_batch", - "source_code": "def pad_batch(self, *dataset_batch_elements): def _pad(batch): padded_dict_batch = {} if isinstance(batch, dict): for key, value in batch.items(): padded_dict_batch[key] = _pad(value) return padded_dict_batch rank = len(batch.shape) assert rank > 0 missing_count = self.padded_batch_size - self.get_real_batch_size(batch) padding = backend.stack([[0, missing_count]] + [[0, 0]] * (rank - 1)) return array_ops.pad(batch, padding, 'constant') if len(dataset_batch_elements) = = 1: return _pad(dataset_batch_elements[0]) batch_elements = [] for batch_element in dataset_batch_elements: batch_elements.append(_pad(batch_element)) return tuple(batch_elements)", - "docstring": "Pads out the batch dimension of a tensor to the complete batch size.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\partial_batch_padding_handler.py", - "ast_data": "FunctionDef name:pad_batch arguments arg:self vararg:dataset_batch_elements FunctionDef name:_pad arguments arg:batch Assign If Call call:isinstance For Call call:items Assign Call call:_pad Return return:yes Assign Call call:len Assign Assign Call call:stack Return return:yes If Compare op:Eq Return return:yes Assign For Return return:yes" - }, - { - "library": "tensorflow", - "name": "force_checkpoint_conversion", - "source_code": "def force_checkpoint_conversion(value = True): global _FORCE_CHECKPOINT_CONVERSION _FORCE_CHECKPOINT_CONVERSION = value", - "docstring": "Forces checkpoint to use the new implementation. The new checkpoint implementation is changing the saved metadata slightly, and therefore may break forward compatibility in newly saved checkpoints. This means: - Previous versions of TensorFlow may not be able to load new checkpoints. - Backwards compatibility is unchanged: Old checkpoints can still be loaded. TensorFlow guarantees 3 weeks of forward compatibility, so this flag will be removed in the future weeks, after which checkpoint conversion will happen by default. **What happens when this flag is enabled?** The checkpoint will be saved with different metadata, meaning that previous versions of TensorFlow (<=2.10) will not be able to load this checkpoint. Args: value: Boolean value, whether or not to force checkpoint conversion to the new implementation.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\saveable_compat.py", - "ast_data": "FunctionDef name:force_checkpoint_conversion arguments arg:value Assign" - }, - { - "library": "tensorflow", - "name": "update_regroup", - "source_code": "def update_regroup(extended, updates, group): if not group: regrouped = regroup(updates, values_lib.Mirrored) return nest.map_structure(extended._local_results, regrouped) def _make_grouped_mirrored(values): if len(values) = = 1: return values_lib.Mirrored(values) g = control_flow_ops.group(values) if not all((tensor_util.is_tf_type(v) for v in values)): return g with_dep = [] for v in values: with ops.device(v.device), ops.control_dependencies([g]): with_dep.append(array_ops.identity(v)) return values_lib.Mirrored(with_dep) return regroup(updates, _make_grouped_mirrored)", - "docstring": "Regroup for an update, with dependencies to ensure all updates execute.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_utils.py", - "ast_data": "FunctionDef name:update_regroup arguments arg:extended arg:updates arg:group If Assign Call call:regroup Return return:yes FunctionDef name:_make_grouped_mirrored arguments arg:values If Compare op:Eq Return return:yes Assign Call call:group If Return return:yes Assign For With Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "get_use_env", - "source_code": "def get_use_env(args) -> bool: if not hasattr(args, 'use_env'): return True return args.use_env", - "docstring": "Retrieve `` and will be deprecated in future releases.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\run.py", - "ast_data": "FunctionDef name:get_use_env arguments arg:args If Return return:yes Return return:yes" - }, - { - "library": "authlib", - "name": "get_jwks", - "source_code": "def get_jwks(self): raise NotImplementedError()", - "docstring": "Return the JWKs that will be used to check the JWT access token signature. Developers MUST re-implement this method. Typically the JWKs are statically stored in the resource server configuration, or dynamically downloaded and cached using :ref::: def get_jwks(self): if \"jwks\" in cache: return cache.get(\"jwks\") server_metadata = get_server_metadata(self.issuer) jwks_uri = server_metadata.get(\"jwks_uri\") cache[\"jwks\"] = requests.get(jwks_uri).json() return cache[\"jwks\"]", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\rfc9068\\token_validator.py", - "ast_data": "FunctionDef name:get_jwks arguments arg:self Raise raises:NotImplementedError()" - }, - { - "library": "algorithms", - "name": "encode_rle", - "source_code": "def encode_rle(input): if not input: return '' encoded_str = '' prev_ch = '' count = 1 for ch in input: if ch ! = prev_ch: if prev_ch: encoded_str + = str(count) + prev_ch count = 1 prev_ch = ch else: count + = 1 else: return encoded_str + (str(count) + prev_ch)", - "docstring": "Gets a stream of data and compresses it under a Run-Length Encoding. :param input: The data to be encoded. :return: The encoded string.", - "type": "function", - "file_path": "algorithms\\algorithms\\compression\\rle_compression.py", - "ast_data": "FunctionDef name:encode_rle arguments arg:input If Return return:yes Assign Assign Assign For If Compare op:NotEq If Assign Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_zsort", - "source_code": "def set_zsort(self, zsort): self._zsortfunc = self._zsort_functions[zsort] self._sort_zpos = None self.stale = True", - "docstring": "Set the calculation method for the z-order. Parameters ---------- zsort : {'average', 'min', 'max'} The function applied on the z-coordinates of the vertices in the viewer's coordinate system, to determine the z-order.", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", - "ast_data": "FunctionDef name:set_zsort arguments arg:self arg:zsort Assign Assign Assign" - }, - { - "library": "scikit-learn", - "name": "fit_predict", - "source_code": "def fit_predict(self, X, y = None, sample_weight = None): self.fit(X, sample_weight = sample_weight) return self.labels_", - "docstring": "Compute clusters from a data or distance matrix and predict labels. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), or (n_samples, n_samples) Training instances to cluster, or distances between instances if `` is by itself a core sample; a sample with a negative weight may inhibit its eps-neighbor from being core. Note that weights are absolute, and default to 1. Returns ------- labels : ndarray of shape (n_samples,) Cluster labels. Noisy samples are given the label -1.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\cluster\\_dbscan.py", - "ast_data": "FunctionDef name:fit_predict arguments arg:self arg:X arg:y arg:sample_weight Return return:yes" - }, - { - "library": "pytorch", - "name": "disallow_in_graph", - "source_code": "def disallow_in_graph(fn): return _disallow_in_graph_helper(throw_if_not_allowed = True)(fn)", - "docstring": "Customize which functions TorchDynamo will exclude in the generated graph and force a graph break on. :: torch._dynamo.disallow_in_graph(torch.sub) @torch._dynamo.optimize(...) def fn(a): x = torch.add(x, 1) x = torch.sub(x, 1) x = torch.add(x, 1) return x fn(...) Will break the graph on , and give two graphs each with a single op.", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\decorators.py", - "ast_data": "FunctionDef name:disallow_in_graph arguments arg:fn Return return:yes" - }, - { - "library": "sphinx", - "name": "add_config_value", - "source_code": "def add_config_value(self, name: str, default: Any, rebuild: _ConfigRebuild, types: type | Collection[type] | ENUM = (), description: str = '') -> None: logger.debug('[app] adding config value: %r', (name, default, rebuild, types)) self.config.add(name = name, default = default, rebuild = rebuild, types = types, description = description)", - "docstring": "Register a configuration value. This is necessary for Sphinx to recognize new values and set default values accordingly. :param name: The name of the configuration value. It is recommended to be prefixed with the extension name (ex. ``) to a string. However, booleans are still accepted and converted internally. .. versionadded:: 7.4 The *description* parameter.", - "type": "method", - "file_path": "sphinx\\sphinx\\application.py", - "ast_data": "FunctionDef name:add_config_value arguments arg:self arg:name type:str arg:default type:Any arg:rebuild type:_ConfigRebuild arg:types type:type | Collection[type] | ENUM arg:description type:str" - }, - { - "library": "scipy", - "name": "sum", - "source_code": "def sum(input, labels = None, index = None): return sum_labels(input, labels, index)", - "docstring": "Calculate the sum of the values of the array. Notes ----- This is an alias for kept for backwards compatibility reasons, for new code please prefer . See the docstring for more details.", - "type": "function", - "file_path": "scipy\\scipy\\ndimage\\_measurements.py", - "ast_data": "FunctionDef name:sum arguments arg:input arg:labels arg:index Return return:yes" - }, - { - "library": "matplotlib", - "name": "connectors", - "source_code": "@property def connectors(self): if self._inset_ax is None: return if self._auto_update_bounds: self._rectangle.set_bounds(self._bounds_from_inset_ax()) self._update_connectors() return tuple(self._connectors)", - "docstring": "4-tuple of or None The four connector lines connecting to (lower_left, upper_left, lower_right upper_right) corners of *inset_ax*. Two lines are set with visibility to *False*, but the user can set the visibility to True if the automatic choice is not deemed correct.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\inset.py", - "ast_data": "FunctionDef name:connectors arguments arg:self If Compare op:Is Return return:no If Return return:yes" - }, - { - "library": "numpy", - "name": "check_libs", - "source_code": "def check_libs(self, lib_dirs, libs, opt_libs = []): exts = self.library_extensions() info = None for ext in exts: info = self._check_libs(lib_dirs, libs, opt_libs, [ext]) if info is not None: break if not info: log.info(' libraries %s not found in %s', ', '.join(libs), lib_dirs) return info", - "docstring": "If static or shared libraries are available then return their info dictionary. Checks for all libraries as shared libraries first, then static (or vice versa if self.search_static_first is True).", - "type": "method", - "file_path": "numpy\\numpy\\distutils\\system_info.py", - "ast_data": "FunctionDef name:check_libs arguments arg:self arg:lib_dirs arg:libs arg:opt_libs Assign Call call:library_extensions Assign For Assign Call call:_check_libs If Compare op:IsNot If Return return:yes" - }, - { - "library": "tensorflow", - "name": "reparameterization_type", - "source_code": "@property def reparameterization_type(self): return self._reparameterization_type", - "docstring": "Describes how samples from the distribution are reparameterized. Currently this is one of the static instances or . Returns: An instance of .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py", - "ast_data": "FunctionDef name:reparameterization_type arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_proj_type", - "source_code": "def set_proj_type(self, proj_type, focal_length = None): _api.check_in_list(['persp', 'ortho'], proj_type = proj_type) if proj_type = = 'persp': if focal_length is None: focal_length = 1 elif focal_length < = 0: raise ValueError(f'focal_length = {focal_length} must be greater than 0') self._focal_length = focal_length else: if focal_length not in (None, np.inf): raise ValueError(f'focal_length = {focal_length} must be None for proj_type = {proj_type}') self._focal_length = np.inf", - "docstring": "Set the projection type. Parameters ---------- proj_type : {'persp', 'ortho'} The projection type. focal_length : float, default: None For a projection type of 'persp', the focal length of the virtual camera. Must be > 0. If None, defaults to 1. The focal length can be computed from a desired Field Of View via the equation: focal_length = 1/tan(FOV/2)", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\axes3d.py", - "ast_data": "FunctionDef name:set_proj_type arguments arg:self arg:proj_type arg:focal_length If Compare op:Eq If Compare op:Is Assign If Compare op:LtE Raise raises:ValueError(f'focal_length = {focal_length} must be greater than 0') Assign If Compare op:NotIn Raise raises:ValueError(f'focal_length = {focal_length} must be None for proj_type = {proj_type}') Assign" - }, - { - "library": "django", - "name": "check_constraints", - "source_code": "def check_constraints(self, table_names = None): pass", - "docstring": "Backends can override this method if they can apply constraint checking (e.g. via \"SET CONSTRAINTS ALL IMMEDIATE\"). Should raise an IntegrityError if any invalid foreign key references are encountered.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\base.py", - "ast_data": "FunctionDef name:check_constraints arguments arg:self arg:table_names" - }, - { - "library": "matplotlib", - "name": "get_images", - "source_code": "def get_images(self): return cbook.silent_list('AxesImage', self.images)", - "docstring": "Return a list of \\s contained by the Axes.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", - "ast_data": "FunctionDef name:get_images arguments arg:self Return return:yes" - }, - { - "library": "scrapy", - "name": "listen_tcp", - "source_code": "def listen_tcp(portrange: list[int], host: str, factory: ServerFactory) -> Port: from twisted.internet import reactor if len(portrange) > 2: raise ValueError(f'invalid portrange: {portrange}') if not portrange: return reactor.listenTCP(0, factory, interface = host) if len(portrange) = = 1: return reactor.listenTCP(portrange[0], factory, interface = host) for x in range(portrange[0], portrange[1] + 1): try: return reactor.listenTCP(x, factory, interface = host) except error.CannotListenError: if x = = portrange[1]: raise", - "docstring": "Like reactor.listenTCP but tries different ports in a range.", - "type": "function", - "file_path": "scrapy\\scrapy\\utils\\reactor.py", - "ast_data": "FunctionDef name:listen_tcp arguments arg:portrange type:list[int] arg:host type:str arg:factory type:ServerFactory If Compare op:Gt Raise raises:ValueError(f'invalid portrange: {portrange}') If Return return:yes If Compare op:Eq Return return:yes For Call call:range Try Return return:yes ExceptHandler If Compare op:Eq Raise" - }, - { - "library": "pytorch", - "name": "to_float", - "source_code": "def to_float(self): cls = type(self) conv = cls._FLOAT_CONV_MODULE(self.in_channels, self.out_channels, self.kernel_size, self.stride, self.padding, self.dilation, self.groups, self.bias is not None, self.padding_mode) conv.weight = torch.nn.Parameter(self.weight.detach()) if self.bias is not None: conv.bias = torch.nn.Parameter(self.bias.detach()) if issubclass(cls, _FusedModule): modules = [conv] assert hasattr(cls, '_FLOAT_RELU_MODULE') relu = cls._FLOAT_RELU_MODULE() modules.append(relu) fused = cls._FLOAT_MODULE(*modules) fused.train(self.training) return fused else: return conv", - "docstring": "This works for both single qat conv, and the qat conv - relu modules to convert the qat module to a floating point module", - "type": "method", - "file_path": "pytorch\\torch\\ao\\nn\\qat\\modules\\conv.py", - "ast_data": "FunctionDef name:to_float arguments arg:self Assign Call call:type Assign Call call:_FLOAT_CONV_MODULE Assign Call call:Parameter If Compare op:IsNot Assign Call call:Parameter If Call call:issubclass Assign Assign Call call:_FLOAT_RELU_MODULE Assign Call call:_FLOAT_MODULE Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "all_gather", - "source_code": "def all_gather(tensor, group = group.WORLD): return _AllGather.apply(group, tensor)", - "docstring": "Gathers tensors from the whole group in a list. Arguments: tensor (Tensor): Tensor to be broadcast from current process. group (ProcessGroup, optional): The process group to work on. Returns: tuple([Tensor]): Output of the collective.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\nn\\functional.py", - "ast_data": "FunctionDef name:all_gather arguments arg:tensor arg:group Return return:yes" - }, - { - "library": "tensorflow", - "name": "put", - "source_code": "def put(self, values, name = None): with ops.name_scope(name, '%s_put' % self._name, self._scope_vals(values)) as scope: if not isinstance(values, (list, tuple, dict)): values = [values] indices = list(range(len(values))) vals, _ = self._check_put_dtypes(values, indices) with ops.colocate_with(self._coloc_op): op = gen_data_flow_ops.stage(values = vals, shared_name = self._name, name = scope, capacity = self._capacity, memory_limit = self._memory_limit) return op", - "docstring": "Create an op that places a value into the staging area. This operation will block if the has reached its capacity. Args: values: A single tensor, a list or tuple of tensors, or a dictionary with tensor values. The number of elements must match the length of the list provided to the dtypes argument when creating the StagingArea. name: A name for the operation (optional). Returns: The created op. Raises: ValueError: If the number or type of inputs don't match the staging area.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\data_flow_ops.py", - "ast_data": "FunctionDef name:put arguments arg:self arg:values arg:name With If Assign Assign Call call:list Assign Call call:_check_put_dtypes With Assign Call call:stage Return return:yes" - }, - { - "library": "kornia", - "name": "EarlyStopping", - "source_code": "class EarlyStopping: def __init__(self, monitor: str, min_delta: float = 0.0, patience: int = 8, max_mode: bool = False) -> None: self.monitor = monitor self.min_delta = min_delta self.patience = patience self.max_mode = max_mode self.counter: int = 0 self.best_score: float = -inf if max_mode else inf self.early_stop: bool = False def __call__(self, model: Module, epoch: int, valid_metric: Dict[str, AverageMeter]) -> TrainerState: score: float = valid_metric[self.monitor].avg is_best: bool = score > self.best_score if self.max_mode else score < self.best_score if is_best: self.best_score = score self.counter = 0 else: is_within_delta: bool = score > self.best_score - self.min_delta if self.max_mode else score < self.best_score + self.min_delta if not is_within_delta: self.counter + = 1 if self.counter > = self.patience: self.early_stop = True if self.early_stop: print(f'[INFO] Early-Stopping the training process. Epoch: {epoch}.') return TrainerState.TERMINATE return TrainerState.TRAINING", - "docstring": "Callback that evaluates whether there is improvement in the loss function. The module track the losses and in case of finish patience sends a termination signal to the trainer. Args: monitor: the name of the value to track. min_delta: the minimum difference between losses to increase the patience counter. patience: the number of times to wait until the trainer does not terminate. max_mode: if true metric will be multiply by -1, turn this flag when increasing metric value is expected for example Accuracy **Usage example:** .. code:: python early_stop = EarlyStopping( monitor=\"loss\", patience=10 ) trainer = ImageClassifierTrainer( callbacks={\"on_epoch_end\", early_stop} )", - "type": "class", - "file_path": "kornia\\kornia\\x\\callbacks.py", - "ast_data": "ClassDef name:EarlyStopping FunctionDef name:__init__ arguments arg:self arg:monitor type:str arg:min_delta type:float arg:patience type:int arg:max_mode type:bool Assign Assign Assign Assign FunctionDef name:__call__ arguments arg:self arg:model type:Module arg:epoch type:int arg:valid_metric type:Dict[str, AverageMeter] If Assign Assign If If Compare op:GtE Assign If Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "SharedObjectLoadingScope", - "source_code": "class SharedObjectLoadingScope(object): def __enter__(self): if _shared_object_disabled(): return NoopLoadingScope() global SHARED_OBJECT_LOADING SHARED_OBJECT_LOADING.scope = self self._obj_ids_to_obj = {} return self def get(self, object_id): if object_id is None: return return self._obj_ids_to_obj.get(object_id) def set(self, object_id, obj): if object_id is None: return self._obj_ids_to_obj[object_id] = obj def __exit__(self, *args, **kwargs): global SHARED_OBJECT_LOADING SHARED_OBJECT_LOADING.scope = NoopLoadingScope()", - "docstring": "A context manager for keeping track of loaded objects. During the deserialization process, we may come across objects that are shared across multiple layers. In order to accurately restore the network structure to its original state, allows us to re-use shared objects rather than cloning them.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py", - "ast_data": "ClassDef name:SharedObjectLoadingScope FunctionDef name:__enter__ arguments arg:self If Call call:_shared_object_disabled Return return:yes Assign Assign Return return:yes FunctionDef name:get arguments arg:self arg:object_id If Compare op:Is Return return:no Return return:yes FunctionDef name:set arguments arg:self arg:object_id arg:obj If Compare op:Is Return return:no Assign FunctionDef name:__exit__ arguments arg:self vararg:args kwarg:kwargs Assign Call call:NoopLoadingScope" - }, - { - "library": "scikit-learn", - "name": "decision_function", - "source_code": "def decision_function(self, X): check_is_fitted(self) negative_mahal_dist = self.score_samples(X) return negative_mahal_dist - self.offset_", - "docstring": "Compute the decision function of the given observations. Parameters ---------- X : array-like of shape (n_samples, n_features) The data matrix. Returns ------- decision : ndarray of shape (n_samples,) Decision function of the samples. It is equal to the shifted Mahalanobis distances. The threshold for being an outlier is 0, which ensures a compatibility with other outlier detection algorithms.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\covariance\\_elliptic_envelope.py", - "ast_data": "FunctionDef name:decision_function arguments arg:self arg:X Assign Call call:score_samples Return return:yes" - }, - { - "library": "tensorflow", - "name": "ExecutableLocation", - "source_code": "@tf_export('distribute.cluster_resolver.KubernetesExecutableLocation') class ExecutableLocation(enum.Enum): WITHIN_CLUSTER = 0 OFF_CLUSTER = 1", - "docstring": "Defines where the executable runs on. This is used to determine how to resolve the configuration to talk with the kube api server. means that the TensorFlow code you are running is running in a pod within the cluster itself. means any other enviroment outside the cluster.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\kubernetes_cluster_resolver.py", - "ast_data": "ClassDef name:ExecutableLocation Call call:tf_export Assign Assign" - }, - { - "library": "pytorch", - "name": "StrictMinMaxConstraint", - "source_code": "@dataclass(frozen = True) class StrictMinMaxConstraint(Constraint): vr: ValueRanges def render(self, source: Source) -> str: return f'{self.vr.lower} < = {source.name()} < = {self.vr.upper}'", - "docstring": "For clients: the size at this dimension must be within 'vr' (which specifies a lower and upper bound, inclusive-inclusive) AND it must be non-negative and should not be 0 or 1 (but see NB below). For backends: there must not be any guards on this dimension which are not implied by the given lower and upper bound. Regardless of the lower bound, the backend can assume the size is non-negative and that it is not 0 or 1. An unbounded StrictMinMaxConstraint can be thought of as a strict version of \"RelaxedUnspecConstraint\". NB: Export will often unsoundly assume that a graph works for 0/1, even though at trace time we assumed size is not 0 or 1. The idea is that if we produce a graph that works for a range of values, it will be OK for N=0/1 too.", - "type": "class", - "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", - "ast_data": "ClassDef name:StrictMinMaxConstraint Call call:dataclass FunctionDef name:render arguments arg:self arg:source type:Source Return return:yes" - }, - { - "library": "pytorch", - "name": "pad_sequence", - "source_code": "def pad_sequence(sequences: Union[Tensor, list[Tensor]], batch_first: bool = False, padding_value: float = 0.0, padding_side: str = 'right') -> Tensor: if not (torch.jit.is_tracing() or torch.jit.is_scripting()): if not isinstance(sequences, Iterable): msg = f'pad_sequence: Expected iterable for input sequences, but got arg of type: {type(sequences)}' raise RuntimeError(msg) sequences = tuple(sequences) elif isinstance(sequences, torch.Tensor): sequences = sequences.unbind(0) return torch._C._nn.pad_sequence(sequences, batch_first, padding_value, padding_side)", - "docstring": "Pad a list of variable length Tensors with :attr:. `sequencesLbatch_firstsequencesTbatch_first` otherwise", - "type": "function", - "file_path": "pytorch\\torch\\nn\\utils\\rnn.py", - "ast_data": "FunctionDef name:pad_sequence arguments arg:sequences type:Union[Tensor, list[Tensor]] arg:batch_first type:bool arg:padding_value type:float arg:padding_side type:str If If Assign Raise raises:RuntimeError(msg) Assign Call call:tuple If Call call:isinstance Assign Call call:unbind Return return:yes" - }, - { - "library": "numpy", - "name": "get_cblas_libs", - "source_code": "def get_cblas_libs(self, info): c = customized_ccompiler() tmpdir = tempfile.mkdtemp() s = textwrap.dedent(' #include \\n int main(int argc, const char *argv[])\\n {\\n double a[4] = {1, 2, 3, 4};\\n double b[4] = {5, 6, 7, 8};\\n return cblas_ddot(4, a, 1, b, 1) > 10;\\n }') src = os.path.join(tmpdir, 'source.c') try: with open(src, 'w') as f: f.write(s) try: obj = c.compile([src], output_dir = tmpdir, include_dirs = self.get_include_dirs()) except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError): return None for libs in [info['libraries'], ['cblas'] + info['libraries'], ['blas'] + info['libraries'], ['cblas'], ['blas']]: try: c.link_executable(obj, os.path.join(tmpdir, 'a.out'), libraries = libs, library_dirs = info['library_dirs'], extra_postargs = info.get('extra_link_args', [])) return libs except distutils.ccompiler.LinkError: pass finally: shutil.rmtree(tmpdir) return None", - "docstring": "Check whether we can link with CBLAS interface This method will search through several combinations of libraries to check whether CBLAS is present: 1. Libraries in `` Parameters ---------- info : dict system information dictionary for compilation and linking Returns ------- libraries : list of str or None a list of libraries that enables the use of CBLAS interface. Returns None if not found or a compilation error occurs. Since 1.17 returns a list.", - "type": "method", - "file_path": "numpy\\numpy\\distutils\\system_info.py", - "ast_data": "FunctionDef name:get_cblas_libs arguments arg:self arg:info Assign Call call:customized_ccompiler Assign Call call:mkdtemp Assign Call call:dedent Assign Call call:join Try With Try Assign Call call:compile ExceptHandler Return return:yes For Try Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "django", - "name": "get_migratable_models", - "source_code": "def get_migratable_models(self, app_config, db, include_auto_created = False): models = app_config.get_models(include_auto_created = include_auto_created) return [model for model in models if self.allow_migrate_model(db, model)]", - "docstring": "Return app models allowed to be migrated on provided db.", - "type": "method", - "file_path": "django\\django\\db\\utils.py", - "ast_data": "FunctionDef name:get_migratable_models arguments arg:self arg:app_config arg:db arg:include_auto_created Assign Call call:get_models Return return:yes" - }, - { - "library": "kornia", - "name": "save", - "source_code": "def save(self, image: Tensor, show_trajectories: bool = True, directory: Optional[str] = None) -> None: if directory is None: name = f'{self.name}_{datetime.datetime.now(tz = datetime.timezone.utc).strftime('%Y%m%d%H%M%S')!s}' directory = os.path.join('kornia_outputs', name) output = self.visualize(image, show_trajectories = show_trajectories) os.makedirs(directory, exist_ok = True) write_image(os.path.join(directory, f'{str(0).zfill(6)}.jpg'), output.byte()) logger.info(f'Outputs are saved in {directory}')", - "docstring": "Save the model to ONNX format. Args: image: The input image. show_trajectories: Whether to visualize trajectories. directory: Where to save the file(s).", - "type": "method", - "file_path": "kornia\\kornia\\models\\tracking\\boxmot_tracker.py", - "ast_data": "FunctionDef name:save arguments arg:self arg:image type:Tensor arg:show_trajectories type:bool arg:directory type:Optional[str] If Compare op:Is Assign Assign Call call:join Assign Call call:visualize" - }, - { - "library": "pygame", - "name": "render", - "source_code": "def render(self, text, antialias, color, background = None): if text is None: text = '' if isinstance(text, str) and self.__unull in text: raise ValueError('A null character was found in the text') if isinstance(text, bytes) and self.__bnull in text: raise ValueError('A null character was found in the text') save_antialiased = self.antialiased self.antialiased = bool(antialias) try: s, _ = super().render(text, color, background) return s finally: self.antialiased = save_antialiased", - "docstring": "render(text, antialias, color, background=None) -> Surface draw text on a new Surface", - "type": "method", - "file_path": "pygame\\src_py\\ftfont.py", - "ast_data": "FunctionDef name:render arguments arg:self arg:text arg:antialias arg:color arg:background If Compare op:Is Assign If BoolOp Call call:isinstance Compare op:In Raise raises:ValueError('A null character was found in the text') If BoolOp Call call:isinstance Compare op:In Raise raises:ValueError('A null character was found in the text') Assign Assign Call call:bool Try Assign Call call:render Return return:yes Assign" - }, - { - "library": "matplotlib", - "name": "polynomial_coefficients", - "source_code": "@property def polynomial_coefficients(self): n = self.degree if n > 10: warnings.warn('Polynomial coefficients formula unstable for high order Bezier curves!', RuntimeWarning) P = self.control_points j = np.arange(n + 1)[:, None] i = np.arange(n + 1)[None, :] prefactor = (-1) ** (i + j) * _comb(j, i) return _comb(n, j) * prefactor @ P", - "docstring": "The polynomial coefficients of the Bézier curve. .. warning:: Follows opposite convention from . Returns ------- (n+1, d) array Coefficients after expanding in polynomial basis, where :math: is the degree of the Bézier curve and :math: its dimension. These are the numbers (:math:) such that the curve can be written :math:. Notes ----- The coefficients are calculated as .. math:: {n \\choose j} \\sum_{i=0}^j (-1)^{i+j} {j \\choose i} P_i where :math: are the control points of the curve.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\bezier.py", - "ast_data": "FunctionDef name:polynomial_coefficients arguments arg:self Assign If Compare op:Gt Assign Assign Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "make_sharded_variable_creator", - "source_code": "def make_sharded_variable_creator(hosts: List[Text]) -> Callable[..., TPUEmbeddingVariable]: def sharded_variable_creator(next_creator: Callable[..., tf_variables.Variable], *args, **kwargs): kwargs['skip_mirrored_creator'] = True num_hosts = len(hosts) name, shape, dtype, unwrapped_initial_value = extract_variable_info(kwargs) initial_value = kwargs['initial_value'] rows = shape[0] cols = shape[1] partial_partition = rows % num_hosts full_rows_per_host = rows // num_hosts partitions = [full_rows_per_host + 1] * partial_partition + [full_rows_per_host] * (num_hosts - partial_partition) variables = [] sharding_aware = 'shard_info' in tf_inspect.getargspec(initial_value).args offset = 0 kwargs['dtype'] = dtype for i, p in enumerate(partitions): if p = = 0: continue with ops.device(hosts[i]): kwargs['name'] = '{}_{}'.format(name, i) kwargs['shape'] = (p, cols) if sharding_aware: shard_info = base.ShardInfo(kwargs['shape'], (offset, 0)) kwargs['initial_value'] = functools.partial(initial_value, shard_info = shard_info) offset + = p else: kwargs['initial_value'] = functools.partial(unwrapped_initial_value, kwargs['shape'], dtype = dtype) variables.append(next_creator(*args, **kwargs)) return TPUEmbeddingVariable(variables, name = name) return sharded_variable_creator", - "docstring": "Makes a sharded variable creator given a list of hosts. Args: hosts: a list of tensorflow devices on which to shard the tensors. Returns: A variable creator function.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v2.py", - "ast_data": "FunctionDef name:make_sharded_variable_creator arguments arg:hosts type:List[Text] FunctionDef name:sharded_variable_creator arguments arg:next_creator type:Callable[..., tf_variables.Variable] vararg:args kwarg:kwargs Assign Assign Call call:len Assign Call call:extract_variable_info Assign Assign Assign Assign Assign Assign Assign Assign Compare op:In Assign Assign For Call call:enumerate If Compare op:Eq With Assign Call call:format Assign If Assign Call call:ShardInfo Assign Call call:partial Assign Call call:partial Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "view_to_reshape", - "source_code": "def view_to_reshape(gm): subgraph_names: OrderedSet[str] = OrderedSet((x.target for x in gm.graph.find_nodes(op = 'get_attr'))) for child_name, child_mod in gm.named_children(): if child_name in subgraph_names and isinstance(child_mod, torch.fx.GraphModule): view_to_reshape(child_mod) for nd in gm.graph.find_nodes(op = 'call_function', target = torch.ops.aten.view.default): nd.target = torch.ops.aten.reshape.default", - "docstring": "Replace view ops in the GraphModule to reshape ops.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py", - "ast_data": "FunctionDef name:view_to_reshape arguments arg:gm For Call call:named_children If BoolOp Compare op:In Call call:isinstance For Call call:find_nodes Assign" - }, - { - "library": "flexx", - "name": "EnumProp", - "source_code": "class EnumProp(Property): _default = '' def _consume_args(self, options, *args): if not isinstance(options, (list, tuple)): raise TypeError('EnumProp needs list of options') if not all([isinstance(i, str) for i in options]): raise TypeError('EnumProp options must be str') if not args: args = (options[0],) self._set_data([option.upper() for option in options]) super()._consume_args(*args) def _validate(self, value, name, data): if not isinstance(value, str): raise TypeError('EnumProp %r value must be str.' % name) value = value.upper() if value.upper() not in data: raise ValueError('Invalid value for enum %r: %s' % (name, value)) return value", - "docstring": "A property that represents a choice between a fixed set of (string) values. Useage: ``. If no initial value is provided, the first option is used.", - "type": "class", - "file_path": "flexx\\flexx\\event\\_property.py", - "ast_data": "ClassDef name:EnumProp Assign FunctionDef name:_consume_args arguments arg:self arg:options vararg:args If Raise raises:TypeError('EnumProp needs list of options') If Raise raises:TypeError('EnumProp options must be str') If Assign FunctionDef name:_validate arguments arg:self arg:value arg:name arg:data If Raise raises:TypeError('EnumProp %r value must be str.' % name) Assign Call call:upper If Compare op:NotIn Raise raises:ValueError('Invalid value for enum %r: %s' % (name, value)) Return return:yes" - }, - { - "library": "django", - "name": "get_post_parameters", - "source_code": "def get_post_parameters(self, request): if request is None: return {} else: sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: cleansed = request.POST.copy() if sensitive_post_parameters = = '__ALL__': for k in cleansed: cleansed[k] = self.cleansed_substitute return cleansed else: for param in sensitive_post_parameters: if param in cleansed: cleansed[param] = self.cleansed_substitute return cleansed else: return request.POST", - "docstring": "Replace the values of POST parameters marked as sensitive with stars (*********).", - "type": "method", - "file_path": "django\\django\\views\\debug.py", - "ast_data": "FunctionDef name:get_post_parameters arguments arg:self arg:request If Compare op:Is Return return:yes Assign Call call:getattr If BoolOp Call call:is_active Assign Call call:copy If Compare op:Eq For Assign Return return:yes For If Compare op:In Assign Return return:yes Return return:yes" - }, - { - "library": "scikit-learn", - "name": "decision_function", - "source_code": "@available_if(_search_estimator_has('decision_function')) def decision_function(self, X): check_is_fitted(self) return self.best_estimator_.decision_function(X)", - "docstring": "Call decision_function on the estimator with the best found parameters. Only available if `X` based on the estimator with the best found parameters.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py", - "ast_data": "FunctionDef name:decision_function arguments arg:self arg:X Call call:available_if Return return:yes" - }, - { - "library": "scikit-learn", - "name": "split", - "source_code": "def split(self, X, y = None, groups = None): return super().split(X, y, groups)", - "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,), default=None The target variable for supervised learning problems. groups : array-like of shape (n_samples,) Group labels for the samples used while splitting the dataset into train/test set. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split. Notes ----- Randomized CV splitters may return different results for each call of split. You can make the results identical by setting to an integer.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py", - "ast_data": "FunctionDef name:split arguments arg:self arg:X arg:y arg:groups Return return:yes" - }, - { - "library": "pytorch", - "name": "InconsistentMetadata", - "source_code": "class InconsistentMetadata(Exception): pass", - "docstring": "Exception that is thrown when AutoHeuristic tries to log data to a file where the metadata stored in the file does not match the metadata it would store if the file didn't exist.", - "type": "class", - "file_path": "pytorch\\torch\\_inductor\\autoheuristic\\autoheuristic.py", - "ast_data": "ClassDef name:InconsistentMetadata" - }, - { - "library": "pytorch", - "name": "flip_cutlass_layout", - "source_code": "@staticmethod def flip_cutlass_layout(cutlass_layout: 'cutlass_lib.LayoutType') -> 'cutlass_lib.LayoutType': assert cutlass_utils.try_import_cutlass() import cutlass_library.library as cutlass_lib if cutlass_layout = = cutlass_lib.LayoutType.RowMajor: return cutlass_lib.LayoutType.ColumnMajor else: return cutlass_lib.LayoutType.RowMajor", - "docstring": "Helper method: Flips a given cutlass layout (cutlass_lib.LayoutType) from RowMajor to ColumnMajor or vice versa", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\gemm_template.py", - "ast_data": "FunctionDef name:flip_cutlass_layout arguments arg:cutlass_layout type:'cutlass_lib.LayoutType' If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "__init__", - "source_code": "def __init__(self, producer, length = None): self._producer = producer self._empty = False self._leftover = b'' self.length = length self.position = 0 self._remaining = length self._unget_history = []", - "docstring": "Every LazyStream must have a producer when instantiated. A producer is an iterable that returns a string each time it is called.", - "type": "method", - "file_path": "django\\django\\http\\multipartparser.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:producer arg:length Assign Assign Assign Assign Assign Assign Assign" - }, - { - "library": "authlib", - "name": "validate_requested_scope", - "source_code": "def validate_requested_scope(self, scope): if scope and self.scopes_supported: scopes = set(scope_to_list(scope)) if not set(self.scopes_supported).issuperset(scopes): raise InvalidScopeError()", - "docstring": "Validate if requested scope is supported by Authorization Server. Developers CAN re-write this method to meet your needs.", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py", - "ast_data": "FunctionDef name:validate_requested_scope arguments arg:self arg:scope If BoolOp Assign Call call:set If Raise raises:InvalidScopeError()" - }, - { - "library": "tensorflow", - "name": "get_optimizer_experimental_options", - "source_code": "def get_optimizer_experimental_options(self): rewrite_options = self.config.graph_options.rewrite_options options = {} def rewriter_toggle(option): attr = getattr(rewrite_options, option) if attr ! = 0: options[option] = attr = = rewriter_config_pb2.RewriterConfig.ON def rewriter_bool(option): options[option] = getattr(rewrite_options, option) rewriter_toggle('layout_optimizer') rewriter_toggle('constant_folding') rewriter_toggle('shape_optimization') rewriter_toggle('remapping') rewriter_toggle('arithmetic_optimization') rewriter_toggle('dependency_optimization') rewriter_toggle('loop_optimization') rewriter_toggle('function_optimization') rewriter_toggle('debug_stripper') rewriter_bool('disable_model_pruning') rewriter_toggle('scoped_allocator_optimization') rewriter_toggle('pin_to_host_optimization') rewriter_toggle('implementation_selector') rewriter_toggle('auto_mixed_precision') rewriter_toggle('use_plugin_optimizers') rewriter_bool('disable_meta_optimizer') rewriter_toggle('auto_mixed_precision_onednn_bfloat16') rewriter_toggle('auto_mixed_precision_mkl') if rewrite_options.min_graph_nodes ! = 0: options['min_graph_nodes'] = rewrite_options.min_graph_nodes return options", - "docstring": "Get experimental options for the optimizer. Returns: Dictionary of current option values", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", - "ast_data": "FunctionDef name:get_optimizer_experimental_options arguments arg:self Assign Assign FunctionDef name:rewriter_toggle arguments arg:option Assign Call call:getattr If Compare op:NotEq Assign Compare op:Eq FunctionDef name:rewriter_bool arguments arg:option Assign Call call:getattr If Compare op:NotEq Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "colocate_vars_with", - "source_code": "@doc_controls.do_not_doc_inheritable @deprecated(None, 'use extended.colocate_vars_with() instead.') def colocate_vars_with(self, colocate_with_variable): return self._extended.colocate_vars_with(colocate_with_variable)", - "docstring": "DEPRECATED: use extended.colocate_vars_with() instead.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\distribute_lib.py", - "ast_data": "FunctionDef name:colocate_vars_with arguments arg:self arg:colocate_with_variable Call call:deprecated Return return:yes" - }, - { - "library": "pandas", - "name": "__call__", - "source_code": "def __call__(self, x, pos: int | None = 0) -> str: fmt = '%H: %M: %S.%f' s = int(x) msus = round((x - s) * 10 ** 6) ms = msus // 1000 us = msus % 1000 m, s = divmod(s, 60) h, m = divmod(m, 60) _, h = divmod(h, 24) if us ! = 0: return pydt.time(h, m, s, msus).strftime(fmt) elif ms ! = 0: return pydt.time(h, m, s, msus).strftime(fmt)[: -3] elif s ! = 0: return pydt.time(h, m, s).strftime('%H: %M: %S') return pydt.time(h, m).strftime('%H: %M')", - "docstring": "Return the time of day as a formatted string. Parameters ---------- x : float The time of day specified as seconds since 00:00 (midnight), with up to microsecond precision. pos Unused Returns ------- str A string in HH:MM:SS.mmmuuu format. Microseconds, milliseconds and seconds are only displayed if non-zero.", - "type": "method", - "file_path": "pandas\\pandas\\plotting\\_matplotlib\\converter.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:x arg:pos type:int | None Assign Assign Call call:int Assign Call call:round Assign Assign Assign Call call:divmod Assign Call call:divmod Assign Call call:divmod If Compare op:NotEq Return return:yes If Compare op:NotEq Return return:yes If Compare op:NotEq Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "predict", - "source_code": "def predict(self, model, df, feature_columns): predictions = model.predict(df[feature_columns]) proba = model.predict_proba(df[feature_columns]) leaf_ids = model.apply(df[feature_columns]) return (predictions, proba, leaf_ids)", - "docstring": "Returns the predictions, probabilities, and leaf ids for a given dataframe.", - "type": "method", - "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py", - "ast_data": "FunctionDef name:predict arguments arg:self arg:model arg:df arg:feature_columns Assign Call call:predict Assign Call call:predict_proba Assign Call call:apply Return return:yes" - }, - { - "library": "matplotlib", - "name": "transform_path", - "source_code": "def transform_path(self, path): return self.transform_path_affine(self.transform_path_non_affine(path))", - "docstring": "Apply the transform to *path*, returning a new . In some cases, this transform may insert curves into the path that began as line segments.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", - "ast_data": "FunctionDef name:transform_path arguments arg:self arg:path Return return:yes" - }, - { - "library": "scipy", - "name": "newer", - "source_code": "def newer(source, target): if not os.path.exists(source): raise ValueError(f\"file '{os.path.abspath(source)}' does not exist\") if not os.path.exists(target): return 1 mtime1 = os.stat(source)[ST_MTIME] mtime2 = os.stat(target)[ST_MTIME] return mtime1 > mtime2", - "docstring": "Return true if 'source' exists and is more recently modified than 'target', or if 'source' exists and 'target' doesn't. Return false if both exist and 'target' is the same age or younger than 'source'.", - "type": "function", - "file_path": "scipy\\scipy\\special\\utils\\makenpz.py", - "ast_data": "FunctionDef name:newer arguments arg:source arg:target If Raise raises:ValueError(f\"file '{os.path.abspath(source)}' does not exist\") If Return return:yes Assign Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "comment", - "source_code": "def comment(self, comment): self.__flush() self.__write(self.__indentation[: len(self.__tags)]) self.__write(f'\\n')", - "docstring": "Add a comment to the output stream. Parameters ---------- comment : str Comment text.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_svg.py", - "ast_data": "FunctionDef name:comment arguments arg:self arg:comment" - }, - { - "library": "tensorflow", - "name": "use_test_undeclared_outputs_dir", - "source_code": "def use_test_undeclared_outputs_dir(self): return self.is_flag_on(FLAG_NAME_USE_TEST_UNDECLARED_OUTPUTS_DIR)", - "docstring": "Decides the output directory of the report and trace files. Args: None. Returns: True if the output files should be written to the test-undeclared-outputs-directory defined via an env variable.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer_flags.py", - "ast_data": "FunctionDef name:use_test_undeclared_outputs_dir arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "run", - "source_code": "def run(self, grid_x: int, grid_y: int, grid_z: int, stream: int, *args: Unpack[tuple[object, ...]]) -> None: from torch._C import _StaticCudaLauncher assert self.function is not None if self.has_global_scratch: arg_tys = self.arg_tys + 'O' args = (*args, None) else: arg_tys = self.arg_tys assert len(args) = = len(arg_tys) _StaticCudaLauncher._launch_kernel(self.function, grid_x, grid_y, grid_z, self.num_warps, self.shared, arg_tys, args, stream)", - "docstring": "Actually run the kernel at runtime. This function is the hot codepath.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\runtime\\static_cuda_launcher.py", - "ast_data": "FunctionDef name:run arguments arg:self arg:grid_x type:int arg:grid_y type:int arg:grid_z type:int arg:stream type:int vararg:args If Assign Assign Assign" - }, - { - "library": "uvicorn", - "name": "ws_handler", - "source_code": "async def ws_handler(self, protocol: WebSocketServerProtocol, path: str) -> Any: self.handshake_completed_event.set() await self.wait_closed()", - "docstring": "This is the main handler function for the 'websockets' implementation to call into. We just wait for close then return, and instead allow 'send' and 'receive' events to drive the flow.", - "type": "method", - "file_path": "uvicorn\\uvicorn\\protocols\\websockets\\websockets_impl.py", - "ast_data": "AsyncFunctionDef name:ws_handler arguments arg:self arg:protocol type:WebSocketServerProtocol arg:path type:str" - }, - { - "library": "scikit-learn", - "name": "EstimatorCheckFailedWarning", - "source_code": "class EstimatorCheckFailedWarning(UserWarning): def __init__(self, *, estimator, check_name: str, exception: Exception, status: str, expected_to_fail: bool, expected_to_fail_reason: str): self.estimator = estimator self.check_name = check_name self.exception = exception self.status = status self.expected_to_fail = expected_to_fail self.expected_to_fail_reason = expected_to_fail_reason def __repr__(self): expected_to_fail_str = f'Expected to fail: {self.expected_to_fail_reason}' if self.expected_to_fail else 'Not expected to fail' return f'Test {self.check_name} failed for estimator {self.estimator!r}.\\nExpected to fail reason: {expected_to_fail_str}\\nException: {self.exception}' def __str__(self): return self.__repr__()", - "docstring": "Warning raised when an estimator check from the common tests fails. Parameters ---------- estimator : estimator object Estimator instance for which the test failed. check_name : str Name of the check that failed. exception : Exception Exception raised by the failed check. status : str Status of the check. expected_to_fail : bool Whether the check was expected to fail. expected_to_fail_reason : str Reason for the expected failure.", - "type": "class", - "file_path": "scikit-learn\\sklearn\\exceptions.py", - "ast_data": "ClassDef name:EstimatorCheckFailedWarning FunctionDef name:__init__ arguments arg:self Assign Assign Assign Assign Assign Assign FunctionDef name:__repr__ arguments arg:self Assign Return return:yes FunctionDef name:__str__ arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "set_matlab_compatible", - "source_code": "def set_matlab_compatible(self): self.mat_dtype = True self.squeeze_me = False self.chars_as_strings = False", - "docstring": "Sets options to return arrays as MATLAB loads them", - "type": "method", - "file_path": "scipy\\scipy\\io\\matlab\\_miobase.py", - "ast_data": "FunctionDef name:set_matlab_compatible arguments arg:self Assign Assign Assign" - }, - { - "library": "django", - "name": "format_html", - "source_code": "def format_html(format_string, *args, **kwargs): if not (args or kwargs): raise TypeError('args or kwargs must be provided.') args_safe = map(conditional_escape, args) kwargs_safe = {k: conditional_escape(v) for k, v in kwargs.items()} return mark_safe(format_string.format(*args_safe, **kwargs_safe))", - "docstring": "Similar to str.format, but pass all arguments through conditional_escape(), and call mark_safe() on the result. This function should be used instead of str.format or % interpolation to build up small HTML fragments.", - "type": "function", - "file_path": "django\\django\\utils\\html.py", - "ast_data": "FunctionDef name:format_html arguments arg:format_string vararg:args kwarg:kwargs If Raise raises:TypeError('args or kwargs must be provided.') Assign Call call:map Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_offset", - "source_code": "@_compat_get_offset def get_offset(self, bbox, renderer): return self._offset(bbox.width, bbox.height, -bbox.x0, -bbox.y0, renderer) if callable(self._offset) else self._offset", - "docstring": "Return the offset as a tuple (x, y). The extent parameters have to be provided to handle the case where the offset is dynamically determined by a callable (see ). Parameters ---------- bbox : renderer : subclass", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", - "ast_data": "FunctionDef name:get_offset arguments arg:self arg:bbox arg:renderer Return return:yes" - }, - { - "library": "tensorflow", - "name": "dynamic_partition", - "source_code": "@dispatch.dispatch_for_api(data_flow_ops.dynamic_partition) def dynamic_partition(data: ragged_tensor.RaggedOrDense, partitions: ragged_tensor.RaggedOrDense, num_partitions, name = None): if not isinstance(num_partitions, int) or num_partitions < 0: raise TypeError('num_partitions must be a non-negative integer') result = stack_dynamic_partitions(data, partitions, num_partitions, name) return [result[i] for i in range(num_partitions)]", - "docstring": "RaggedTensor dispatch override for tf.dynamic_partition.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py", - "ast_data": "FunctionDef name:dynamic_partition arguments arg:data type:ragged_tensor.RaggedOrDense arg:partitions type:ragged_tensor.RaggedOrDense arg:num_partitions arg:name Call call:dispatch_for_api If BoolOp Compare op:Lt Raise raises:TypeError('num_partitions must be a non-negative integer') Assign Call call:stack_dynamic_partitions Return return:yes" - }, - { - "library": "scipy", - "name": "f", - "source_code": "@property def f(self) -> np.ndarray: if self.fft_mode in {'onesided', 'onesided2X'}: return fft_lib.rfftfreq(self.mfft, self.T) elif self.fft_mode = = 'twosided': return fft_lib.fftfreq(self.mfft, self.T) elif self.fft_mode = = 'centered': return fft_lib.fftshift(fft_lib.fftfreq(self.mfft, self.T)) fft_modes = get_args(FFT_MODE_TYPE) raise RuntimeError(f'self.fft_mode = {self.fft_mode!r} not in {fft_modes}!')", - "docstring": "Frequencies values of the STFT. A 1d array of length with spaced entries is returned. See Also -------- delta_f: Width of the frequency bins of the STFT. f_pts: Number of points along the frequency axis. mfft: Length of the input for FFT used. ShortTimeFFT: Class this property belongs to.", - "type": "method", - "file_path": "scipy\\scipy\\signal\\_short_time_fft.py", - "ast_data": "FunctionDef name:f arguments arg:self If Compare op:In Return return:yes If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes Assign Call call:get_args Raise raises:RuntimeError(f'self.fft_mode={self.fft_mode!r} not in {fft_modes}!')" - }, - { - "library": "tensorflow", - "name": "sigmoid", - "source_code": "@dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def sigmoid(x): return nn.sigmoid(x)", - "docstring": "Element-wise sigmoid. Args: x: A tensor or variable. Returns: A tensor.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", - "ast_data": "FunctionDef name:sigmoid arguments arg:x Return return:yes" - }, - { - "library": "pytorch", - "name": "apply_patch", - "source_code": "def apply_patch(patch_file: str, target_dir: Optional[str], strip_count: int) -> None: if target_dir: print(f'Applying patch in directory: {target_dir}') else: print('No target directory specified. Using PyTorch installation path.') print(f'Applying patch with strip count: {strip_count}') try: patch_command = ['patch', f'-p{strip_count}', '-i', patch_file] if target_dir: patch_command.insert(1, f'-d{target_dir}') print(f'Running command: {' '.join(patch_command)}') result = subprocess.run(patch_command, capture_output = True, text = True) else: patch_command.insert(1, f'-d{target_dir}') print(f'Running command: {' '.join(patch_command)}') result = subprocess.run(patch_command, capture_output = True, text = True) if result.returncode ! = 0: print('Failed to apply patch.') print('Patch output: ') print(result.stdout) print(result.stderr) sys.exit(1) else: print('Patch applied successfully.') except FileNotFoundError: print(\"Error: The 'patch' utility is not installed or not found in PATH.\") sys.exit(1) except Exception as e: print(f'An error occurred while applying the patch: {e}') sys.exit(1)", - "docstring": "Applies the downloaded patch to the specified directory using the given strip count. Args: patch_file (str): The path to the patch file. target_dir (Optional[str]): The directory to apply the patch to. If None, uses PyTorch installation path. strip_count (int): The number of leading directories to strip from file paths in the patch. Exits: If the patch command fails or the 'patch' utility is not available, the script will exit.", - "type": "function", - "file_path": "pytorch\\tools\\nightly_hotpatch.py", - "ast_data": "FunctionDef name:apply_patch arguments arg:patch_file type:str arg:target_dir type:Optional[str] arg:strip_count type:int If Try Assign If Assign Call call:run Assign Call call:run If Compare op:NotEq ExceptHandler ExceptHandler" - }, - { - "library": "scipy", - "name": "get_residual", - "source_code": "def get_residual(self): return self._data[10]", - "docstring": "Return weighted sum of squared residuals of the spline approximation. This is equivalent to:: sum((w[i] * (y[i]-spl(x[i])))**2, axis=0)", - "type": "method", - "file_path": "scipy\\scipy\\interpolate\\_fitpack2.py", - "ast_data": "FunctionDef name:get_residual arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "is_range_tensor", - "source_code": "def is_range_tensor(t): return tensor_util.is_tf_type(t) and hasattr(t, 'op') and (t.op.type = = 'Range')", - "docstring": "Returns True if a tensor is the result of a tf.range op. Best effort.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\utils\\tensors.py", - "ast_data": "FunctionDef name:is_range_tensor arguments arg:t Return return:yes" - }, - { - "library": "matplotlib", - "name": "bin_path", - "source_code": "@classmethod def bin_path(cls): return str(mpl.rcParams[cls._exec_key])", - "docstring": "Return the binary path to the commandline tool used by a specific subclass. This is a class method so that the tool can be looked for before making a particular MovieWriter subclass available.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\animation.py", - "ast_data": "FunctionDef name:bin_path arguments arg:cls Return return:yes" - }, - { - "library": "scikit-learn", - "name": "predict", - "source_code": "def predict(self, X): check_is_fitted(self) X = validate_data(self, X, reset = False) return self._estimate_weighted_log_prob(X).argmax(axis = 1)", - "docstring": "Predict the labels for the data samples in X using trained model. Parameters ---------- X : array-like of shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- labels : array, shape (n_samples,) Component labels.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\mixture\\_base.py", - "ast_data": "FunctionDef name:predict arguments arg:self arg:X Assign Call call:validate_data Return return:yes" - }, - { - "library": "cherrypy", - "name": "modules", - "source_code": "def modules(modulePath): __import__(modulePath) return sys.modules[modulePath]", - "docstring": "Load a module and retrieve a reference to that module.", - "type": "function", - "file_path": "cherrypy\\cherrypy\\lib\\reprconf.py", - "ast_data": "FunctionDef name:modules arguments arg:modulePath Return return:yes" - }, - { - "library": "pytorch", - "name": "forward", - "source_code": "def forward(self, *args: Any, **kwargs: Any) -> Any: handle = self._handle with torch.autograd.profiler.record_function('FullyShardedDataParallel.forward'): args, kwargs = _root_pre_forward(self, self, args, kwargs) unused = None args, kwargs = _pre_forward(self, handle, _pre_forward_unshard, self._fsdp_wrapped_module, args, kwargs) if handle: _p_assert(handle.flat_param.device = = self.compute_device, f'Expected `FlatParameter` to be on the compute device {self.compute_device} but got {handle.flat_param.device}') output = self._fsdp_wrapped_module(*args, **kwargs) return _post_forward(self, handle, _post_forward_reshard, self, unused, output)", - "docstring": "Run the forward pass for the wrapped module, inserting FSDP-specific pre- and post-forward sharding logic.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py", - "ast_data": "FunctionDef name:forward arguments arg:self vararg:args kwarg:kwargs Assign With Assign Call call:_root_pre_forward Assign Assign Call call:_pre_forward If Assign Call call:_fsdp_wrapped_module Return return:yes" - }, - { - "library": "tensorflow", - "name": "handle", - "source_code": "@property def handle(self): tpu_context = tpu_util.enclosing_tpu_context() if tpu_context is None or context.executing_eagerly(): var = self._get_on_device_or_primary() if isinstance(var, packed.PackedVarAndDevice): return var.on_device_handle() else: return var.handle else: is_packed = self._packed_var is not None val = self._values if is_packed: val = [self._packed_var] return tpu_context.get_replicated_var_handle(self._common_name, self._handle_id, val, self._is_mirrored(), is_packed)", - "docstring": "The handle by which this variable can be accessed.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_values.py", - "ast_data": "FunctionDef name:handle arguments arg:self Assign Call call:enclosing_tpu_context If BoolOp Compare op:Is Call call:executing_eagerly Assign Call call:_get_on_device_or_primary If Call call:isinstance Return return:yes Return return:yes Assign Compare op:IsNot Assign If Assign Return return:yes" - }, - { - "library": "mongo", - "name": "timeout", - "source_code": "@property def timeout(self) -> bool: return False", - "docstring": "True if this error was caused by a timeout. .. versionadded:: 4.2", - "type": "method", - "file_path": "mongo\\pymongo\\errors.py", - "ast_data": "FunctionDef name:timeout arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "embedding_strategy", - "source_code": "@register_op_strategy(aten.embedding.default) def embedding_strategy(op_schema: OpSchema) -> StrategyType: weight_strategy = cast(OpStrategy, op_schema.args_schema[0]) indices_strategy = cast(OpStrategy, op_schema.args_schema[1]) mesh = op_schema.get_mesh_from_args() weight_shape = weight_strategy.shape indices_shape = indices_strategy.shape output_emd_dim = len(indices_shape) single_mesh_dim_strategies = [] all_replicate: PlacementList = [Replicate()] * 3 single_mesh_dim_strategies.append(all_replicate) colwise_sharding: PlacementList = [Shard(output_emd_dim), Shard(1), Replicate()] single_mesh_dim_strategies.append(colwise_sharding) embedding_partial_placement = _MaskPartial(offset_shape = weight_shape, offset_dim = 0) rowwise_sharding: PlacementList = [embedding_partial_placement, Shard(0), embedding_partial_placement] single_mesh_dim_strategies.append(rowwise_sharding) for input_dim in range(len(indices_shape)): batch_sharding: PlacementList = [Shard(input_dim), Replicate(), Shard(input_dim)] single_mesh_dim_strategies.append(batch_sharding) return expand_to_full_mesh_op_strategy(mesh, op_schema, single_mesh_dim_strategies)", - "docstring": "This strategy handles embedding op. We have two possible embedding shardings: rowwise and colwise", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\_embedding_ops.py", - "ast_data": "FunctionDef name:embedding_strategy arguments arg:op_schema type:OpSchema Call call:register_op_strategy Assign Call call:cast Assign Call call:cast Assign Call call:get_mesh_from_args Assign Assign Assign Call call:len Assign Assign Call call:_MaskPartial For Call call:range Return return:yes" - }, - { - "library": "pytorch", - "name": "generate_equalization_mapping", - "source_code": "def generate_equalization_mapping(self) -> QConfigMapping: detector_qconfig_info_combined = self._generate_module_fqn_to_detector_info_mapping(self._update_detector_equalization_qconfig_info) mapping: QConfigMapping = self._generate_qconfig_mapping_helper(detector_qconfig_info_combined, self._equalization_config_generator) return mapping", - "docstring": "Generates a QConfigMapping based on the suggestions of the ModelReport API for equalization. The generated mapping encompasses all the different types of feedback from the input-weight equalization detector. These configs are based on the suggestions provided by the ModelReport API and can only be generated once the reports have been generated. Returns a QConfigMapping for the equalization configuration", - "type": "method", - "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_model_report\\model_report.py", - "ast_data": "FunctionDef name:generate_equalization_mapping arguments arg:self Assign Call call:_generate_module_fqn_to_detector_info_mapping Return return:yes" - }, - { - "library": "pandas", - "name": "cummin", - "source_code": "@final @Substitution(name = 'groupby') @Substitution(see_also = _common_see_also) def cummin(self, numeric_only: bool = False, **kwargs) -> NDFrameT: skipna = kwargs.get('skipna', True) return self._cython_transform('cummin', numeric_only = numeric_only, skipna = skipna)", - "docstring": "Cumulative min for each group. Parameters ---------- numeric_only : bool, default False Include only , or data. **kwargs : dict, optional Additional keyword arguments to be passed to the function, such as , to control whether NA/null values are ignored. Returns ------- Series or DataFrame Cumulative min for each group. Same object type as the caller. %(see_also)s Examples -------- For SeriesGroupBy: >>> lst = [\"a\", \"a\", \"a\", \"b\", \"b\", \"b\"] >>> ser = pd.Series([1, 6, 2, 3, 0, 4], index=lst) >>> ser a 1 a 6 a 2 b 3 b 0 b 4 dtype: int64 >>> ser.groupby(level=0).cummin() a 1 a 1 a 1 b 3 b 0 b 0 dtype: int64 For DataFrameGroupBy: >>> data = [[1, 0, 2], [1, 1, 5], [6, 6, 9]] >>> df = pd.DataFrame( ... data, columns=[\"a\", \"b\", \"c\"], index=[\"snake\", \"rabbit\", \"turtle\"] ... ) >>> df a b c snake 1 0 2 rabbit 1 1 5 turtle 6 6 9 >>> df.groupby(\"a\").groups {1: ['snake', 'rabbit'], 6: ['turtle']} >>> df.groupby(\"a\").cummin() b c snake 0 2 rabbit 0 2 turtle 6 9", - "type": "method", - "file_path": "pandas\\pandas\\core\\groupby\\groupby.py", - "ast_data": "FunctionDef name:cummin arguments arg:self arg:numeric_only type:bool kwarg:kwargs Call call:Substitution Call call:Substitution Assign Call call:get Return return:yes" - }, - { - "library": "scipy", - "name": "integrate", - "source_code": "def integrate(self, a, b, extrapolate = None): ib = self.antiderivative() if extrapolate is None: extrapolate = self.extrapolate if extrapolate ! = 'periodic': ib.extrapolate = extrapolate if extrapolate = = 'periodic': if a < = b: sign = 1 else: a, b = (b, a) sign = -1 xs, xe = (self.x[0], self.x[-1]) period = xe - xs interval = b - a n_periods, left = divmod(interval, period) res = n_periods * (ib(xe) - ib(xs)) a = xs + (a - xs) % period b = a + left if b < = xe: res + = ib(b) - ib(a) else: res + = ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs) return sign * res else: return ib(b) - ib(a)", - "docstring": "Compute a definite integral over a piecewise polynomial. Parameters ---------- a : float Lower integration bound b : float Upper integration bound extrapolate : {bool, 'periodic', None}, optional Whether to extrapolate to out-of-bounds points based on first and last intervals, or to return NaNs. If 'periodic', periodic extrapolation is used. If None (default), use . Returns ------- array_like Definite integral of the piecewise polynomial over [a, b]", - "type": "method", - "file_path": "scipy\\scipy\\interpolate\\_interpolate.py", - "ast_data": "FunctionDef name:integrate arguments arg:self arg:a arg:b arg:extrapolate Assign Call call:antiderivative If Compare op:Is Assign If Compare op:NotEq Assign If Compare op:Eq If Compare op:LtE Assign Assign Assign Assign Assign Assign Assign Call call:divmod Assign Assign Assign If Compare op:LtE Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "use_wrapped_call", - "source_code": "def use_wrapped_call(layer, call_fn, default_training_value = None, return_method = False): expects_training_arg = layer_uses_training_bool(layer) if hasattr(call_fn, 'original_layer_call'): original_call = call_fn.original_layer_call call_fn = call_fn.__call__ else: original_call = call_fn fn, arg_spec = maybe_add_training_arg(original_call, call_fn, expects_training_arg, default_training_value) def return_outputs_and_add_losses(*args, **kwargs): if return_method: args = args[1:] outputs, losses = fn(*args, **kwargs) layer.add_loss(losses, inputs = True) if context.executing_eagerly(): for i in layer._flatten_layers(): if i is not layer: i._eager_losses = [base_layer_utils.REVIVED_LOSS_PLACEHOLDER] return outputs decorated = tf_decorator.make_decorator(target = call_fn, decorator_func = return_outputs_and_add_losses, decorator_argspec = arg_spec) if return_method: return types.MethodType(decorated, layer) else: return decorated", - "docstring": "Creates fn that adds the losses returned by call_fn & returns the outputs. Args: layer: A Keras layer object call_fn: tf.function that takes layer inputs (and possibly a training arg), and returns a tuple of (outputs, list of losses). default_training_value: Default value of the training kwarg. If , the default is . return_method: Whether to return a method bound to the layer. Returns: function that calls call_fn and returns the outputs. Losses returned by call_fn are added to the layer losses.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\utils.py", - "ast_data": "FunctionDef name:use_wrapped_call arguments arg:layer arg:call_fn arg:default_training_value arg:return_method Assign Call call:layer_uses_training_bool If Call call:hasattr Assign Assign Assign Assign Call call:maybe_add_training_arg FunctionDef name:return_outputs_and_add_losses arguments vararg:args kwarg:kwargs If Assign Assign Call call:fn If Call call:executing_eagerly For Call call:_flatten_layers If Compare op:IsNot Assign Return return:yes Assign Call call:make_decorator If Return return:yes Return return:yes" - }, - { - "library": "kornia", - "name": "nms3d", - "source_code": "def nms3d(input: Tensor, kernel_size: tuple[int, int, int], mask_only: bool = False) -> Tensor: return NonMaximaSuppression3d(kernel_size)(input, mask_only)", - "docstring": "Apply non maxima suppression to filter. See :class: for details.", - "type": "function", - "file_path": "kornia\\kornia\\geometry\\subpix\\nms.py", - "ast_data": "FunctionDef name:nms3d arguments arg:input type:Tensor arg:kernel_size type:tuple[int, int, int] arg:mask_only type:bool Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_depthshade", - "source_code": "def set_depthshade(self, depthshade, depthshade_minalpha = None): if depthshade_minalpha is None: depthshade_minalpha = rcParams['axes3d.depthshade_minalpha'] self._depthshade = depthshade self._depthshade_minalpha = depthshade_minalpha self.stale = True", - "docstring": "Set whether depth shading is performed on collection members. Parameters ---------- depthshade : bool Whether to shade the patches in order to give the appearance of depth. depthshade_minalpha : float Sets the minimum alpha value used by depth-shading. .. versionadded:: 3.11", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", - "ast_data": "FunctionDef name:set_depthshade arguments arg:self arg:depthshade arg:depthshade_minalpha If Compare op:Is Assign Assign Assign Assign" - }, - { - "library": "pytorch", - "name": "reapply_all_patches", - "source_code": "def reapply_all_patches(self): for patch in self.patches_made: patch.patch() return self.patches_made", - "docstring": "Patch all the stored patcheds. It doesn't modify patches_made.", - "type": "method", - "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py", - "ast_data": "FunctionDef name:reapply_all_patches arguments arg:self For Return return:yes" - }, - { - "library": "tensorflow", - "name": "assign_sub", - "source_code": "def assign_sub(self, delta, use_locking = None, name = None, read_value = True): with _handle_graph(self.handle), self._assign_dependencies(): assign_sub_op = gen_resource_variable_ops.assign_sub_variable_op(self.handle, ops.convert_to_tensor(delta, dtype = self.dtype), name = name) if read_value: return self._lazy_read(assign_sub_op) return assign_sub_op", - "docstring": "Subtracts a value from this variable. Args: delta: A . The value to subtract from this variable. use_locking: If , use locking during the operation. name: The name to use for the operation. read_value: A . Whether to read and return the new value of the variable or not. Returns: If is , this method will return the new value of the variable after the assignment has completed. Otherwise, when in graph mode it will return the that does the assignment, and when in eager mode it will return .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", - "ast_data": "FunctionDef name:assign_sub arguments arg:self arg:delta arg:use_locking arg:name arg:read_value With Assign Call call:assign_sub_variable_op If Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "ChebyshevQuadrature", - "source_code": "class ChebyshevQuadrature(LSQBenchmarkProblem): INITIAL_GUESSES = [(1 + np.arange(11, dtype = float)) / 12] def __init__(self, x0): super().__init__(11, 11, 0.002799761, x0) cp = Chebyshev(1) self.T_all = [cp.basis(i, domain = [0.0, 1.0]) for i in range(11)] def fun(self, x): f = np.empty(self.n) for i in range(self.m): T = self.T_all[i] f[i] = np.mean(T(x)) - T.integ(lbnd = 0.0)(1.0) return f def jac(self, x): J = np.empty((self.m, self.n)) for i in range(self.m): T = self.T_all[i] J[i] = T.deriv()(x) J / = self.n return J", - "docstring": "The problem of determining the optimal nodes of a quadrature formula with equal weights, [1]_. Number of variables --- 11, number of residuals --- 11, no bounds. .. [1] Brett M. Averick et al. \"The MINPACK-2 Test Problem Collection\", p. 30", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\lsq_problems.py", - "ast_data": "ClassDef name:ChebyshevQuadrature Assign FunctionDef name:__init__ arguments arg:self arg:x0 Assign Call call:Chebyshev Assign FunctionDef name:fun arguments arg:self arg:x Assign Call call:empty For Call call:range Assign Assign Return return:yes FunctionDef name:jac arguments arg:self arg:x Assign Call call:empty For Call call:range Assign Assign Call Return return:yes" - }, - { - "library": "pytorch", - "name": "dropout3d", - "source_code": "def dropout3d(input: Tensor, p: float = 0.5, training: bool = True, inplace: bool = False) -> Tensor: if has_torch_function_unary(input): return handle_torch_function(dropout3d, (input,), input, p = p, training = training, inplace = inplace) if p < 0.0 or p > 1.0: raise ValueError(f'dropout probability has to be between 0 and 1, but got {p}') inp_dim = input.dim() if inp_dim not in (4, 5): warn_msg = f'dropout3d: Received a {inp_dim}-D input to dropout3d, which is deprecated and will result in an error in a future release. To retain the behavior and silence this warning, please use dropout instead. Note that dropout3d exists to provide channel-wise dropout on inputs with 3 spatial dimensions, a channel dimension, and an optional batch dimension (i.e. 4D or 5D inputs).' warnings.warn(warn_msg) is_batched = inp_dim = = 5 if not is_batched: input = input.unsqueeze_(0) if inplace else input.unsqueeze(0) result = _VF.feature_dropout_(input, p, training) if inplace else _VF.feature_dropout(input, p, training) if not is_batched: result = result.squeeze_(0) if inplace else result.squeeze(0) return result", - "docstring": "Randomly zero out entire channels (a channel is a 3D feature map). For example, the :math:-th channel of the :math:-th sample in the batched input is a 3D tensor :math: of the input tensor. Each channel will be zeroed out independently on every forward call with probability :attr: using samples from a Bernoulli distribution. See :class: for details. Args: p: probability of a channel to be zeroed. Default: 0.5 training: apply dropout if is ``", - "type": "function", - "file_path": "pytorch\\torch\\nn\\functional.py", - "ast_data": "FunctionDef name:dropout3d arguments arg:input type:Tensor arg:p type:float arg:training type:bool arg:inplace type:bool If Call call:has_torch_function_unary Return return:yes If BoolOp Compare op:Lt Compare op:Gt Raise raises:ValueError(f'dropout probability has to be between 0 and 1, but got {p}') Assign Call call:dim If Compare op:NotIn Assign Assign Compare op:Eq If Assign Assign If Assign Return return:yes" - }, - { - "library": "numpy", - "name": "update", - "source_code": "def update(self, func, default = None, testing_value = None, missing_values = '', locked = False): self.func = func self._locked = locked if default is not None: self.default = default self.type = self._dtypeortype(self._getdtype(default)) else: try: tester = func(testing_value or '1') except (TypeError, ValueError): tester = None self.type = self._dtypeortype(self._getdtype(tester)) if missing_values is None: self.missing_values = set() else: if not np.iterable(missing_values): missing_values = [missing_values] if not all((isinstance(v, str) for v in missing_values)): raise TypeError('missing_values must be strings or unicode') self.missing_values.update(missing_values)", - "docstring": "Set StringConverter attributes directly. Parameters ---------- func : function Conversion function. default : any, optional Value to return by default, that is, when the string to be converted is flagged as missing. If not given, tries to supply a reasonable default value. testing_value : str, optional A string representing a standard input value of the converter. This string is used to help defining a reasonable default value. missing_values : {sequence of str, None}, optional Sequence of strings indicating a missing value. If `missing_valuesupdateStringConverterfuncdtypedtype_or_func` in the constructor does.", - "type": "method", - "file_path": "numpy\\numpy\\lib\\_iotools.py", - "ast_data": "FunctionDef name:update arguments arg:self arg:func arg:default arg:testing_value arg:missing_values arg:locked Assign Assign If Compare op:IsNot Assign Assign Call call:_dtypeortype Try Assign Call call:func ExceptHandler Assign Assign Call call:_dtypeortype If Compare op:Is Assign Call call:set If Assign If Raise raises:TypeError('missing_values must be strings or unicode')" - }, - { - "library": "tensorflow", - "name": "internal_operation_seed", - "source_code": "def internal_operation_seed(): return context()._internal_operation_seed()", - "docstring": "Returns the operation seed generated based on global seed.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", - "ast_data": "FunctionDef name:internal_operation_seed arguments Return return:yes" - }, - { - "library": "pytorch", - "name": "set_key_value", - "source_code": "def set_key_value(self, metric: str, key: str, value: Any) -> None: if self._level = = 0: raise RuntimeError(f'Cannot set {metric} outside of a MetricsContext') if metric not in self._metrics: self._metrics[metric] = {} self._metrics[metric][key] = value", - "docstring": "Treats a give metric as a dictionary and set the k and value within it. Note that the metric must be a dictionary or not present. We allow this to be called multiple times (i.e. for features, it's not uncommon for them to be used multiple times within a single compilation).", - "type": "method", - "file_path": "pytorch\\torch\\_dynamo\\metrics_context.py", - "ast_data": "FunctionDef name:set_key_value arguments arg:self arg:metric type:str arg:key type:str arg:value type:Any If Compare op:Eq Raise raises:RuntimeError(f'Cannot set {metric} outside of a MetricsContext') If Compare op:NotIn Assign Assign" - }, - { - "library": "pytorch", - "name": "dump_chrome_trace", - "source_code": "def dump_chrome_trace(f, input, trace_filename, optimize_ctx, activities, num_runs = 1, devices = None, kwargs_for_f = None, kwargs_for_profiler = None): if devices is None: devices = ['cuda'] global synchronize if devices ! = ['cpu'] and torch.cuda.is_available(): synchronize = torch.cuda.synchronize if kwargs_for_f is None: kwargs_for_f = {} if kwargs_for_profiler is None: kwargs_for_profiler = {} with optimize_ctx: torch.manual_seed(1337) for _ in range(5): f(input, **kwargs_for_f) synchronize() torch.manual_seed(1337) t0 = time.perf_counter() for _ in range(num_runs): f(input, **kwargs_for_f) synchronize() t1 = time.perf_counter() timing = t1 - t0 with profile(activities = activities, **kwargs_for_profiler) as prof: with optimize_ctx: synchronize() torch.manual_seed(1337) for _ in range(num_runs): f(input, **kwargs_for_f) synchronize() prof.export_chrome_trace(trace_filename) return timing", - "docstring": "Output the chrome trace of running f(input, **kwargs_for_f) with [optimize_ctx] [num_runs] times to [trace_filename]. [activities] are the activities that the profiler will record, e.g. ProfilerActivity.CUDA. Return total runtime without the profiler Outputs to trace_filename", - "type": "function", - "file_path": "pytorch\\torch\\_functorch\\benchmark_utils.py", - "ast_data": "FunctionDef name:dump_chrome_trace arguments arg:f arg:input arg:trace_filename arg:optimize_ctx arg:activities arg:num_runs arg:devices arg:kwargs_for_f arg:kwargs_for_profiler If Compare op:Is Assign If BoolOp Compare op:NotEq Call call:is_available Assign If Compare op:Is Assign If Compare op:Is Assign With For Call call:range Assign Call call:perf_counter For Call call:range Assign Call call:perf_counter Assign With With For Call call:range Return return:yes" - }, - { - "library": "django", - "name": "get_base_chain", - "source_code": "def get_base_chain(self, model): if not self.parents: return [] if model in self.parents: return [model] for parent in self.parents: res = parent._meta.get_base_chain(model) if res: res.insert(0, parent) return res return []", - "docstring": "Return a list of parent classes leading to (ordered from closest to most distant ancestor). This has to handle the case where is a grandparent or even more distant relation.", - "type": "method", - "file_path": "django\\django\\db\\models\\options.py", - "ast_data": "FunctionDef name:get_base_chain arguments arg:self arg:model If Return return:yes If Compare op:In Return return:yes For Assign Call call:get_base_chain If Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "apply_shuffle_settings", - "source_code": "def apply_shuffle_settings(datapipe: DataPipe, shuffle: Optional[bool] = None) -> DataPipe: if shuffle is None: return datapipe graph = traverse_dps(datapipe) all_pipes = get_all_graph_pipes(graph) shufflers = [pipe for pipe in all_pipes if _is_shuffle_datapipe(pipe)] if not shufflers and shuffle: warnings.warn('`shuffle = True` was set, but the datapipe does not contain a `Shuffler`. Adding one at the end. Be aware that the default buffer size might not be sufficient for your task.') datapipe = datapipe.shuffle() shufflers = [datapipe] for shuffler in shufflers: shuffler.set_shuffle(shuffle) return datapipe", - "docstring": "Traverse the graph of `DataPipe` and no-op to the graph)", - "type": "function", - "file_path": "pytorch\\torch\\utils\\data\\graph_settings.py", - "ast_data": "FunctionDef name:apply_shuffle_settings arguments arg:datapipe type:DataPipe arg:shuffle type:Optional[bool] If Compare op:Is Return return:yes Assign Call call:traverse_dps Assign Call call:get_all_graph_pipes Assign If BoolOp Assign Call call:shuffle Assign For Return return:yes" - }, - { - "library": "numpy", - "name": "feature_test", - "source_code": "@_Cache.me def feature_test(self, name, force_flags = None, macros = []): if force_flags is None: force_flags = self.feature_flags(name) self.dist_log(\"testing feature '%s' with flags (%s)\" % (name, ' '.join(force_flags))) test_path = os.path.join(self.conf_check_path, 'cpu_%s.c' % name.lower()) if not os.path.exists(test_path): self.dist_fatal('feature test file is not exist', test_path) test = self.dist_test(test_path, force_flags + self.cc_flags['werror'], macros = macros) if not test: self.dist_log('testing failed', stderr = True) return test", - "docstring": "Test a certain CPU feature against the compiler through its own check file. Parameters ---------- name : str Supported CPU feature name. force_flags : list or None, optional If None(default), the returned flags from will be used. macros : list of tuples, optional A list of C macro definitions.", - "type": "method", - "file_path": "numpy\\numpy\\distutils\\ccompiler_opt.py", - "ast_data": "FunctionDef name:feature_test arguments arg:self arg:name arg:force_flags arg:macros If Compare op:Is Assign Call call:feature_flags Assign Call call:join If Assign Call call:dist_test If Return return:yes" - }, - { - "library": "pytorch", - "name": "info_dict", - "source_code": "def info_dict(self) -> dict[str, Union[PrimitiveInfoType, list[PrimitiveInfoType]]]: return {}", - "docstring": "Information returned here is logged to the autotune log file when that is enabled.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\ir.py", - "ast_data": "FunctionDef name:info_dict arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "join_device", - "source_code": "@property @abstractmethod def join_device(self) -> torch.device: ...", - "docstring": "Return the device from which to perform collective communications needed by the join context manager.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\algorithms\\join.py", - "ast_data": "FunctionDef name:join_device arguments arg:self" - }, - { - "library": "pygame", - "name": "get_fonts", - "source_code": "def get_fonts(): initsysfonts() return list(Sysfonts)", - "docstring": "pygame.font.get_fonts() -> list get a list of system font names Returns the list of all found system fonts. Note that the names of the fonts will be all lowercase with spaces removed. This is how pygame internally stores the font names for matching.", - "type": "function", - "file_path": "pygame\\src_py\\sysfont.py", - "ast_data": "FunctionDef name:get_fonts arguments Return return:yes" - }, - { - "library": "scipy", - "name": "getcol", - "source_code": "def getcol(self, j): return self._getcol(j)", - "docstring": "Returns a copy of column j of the matrix, as an (m x 1) sparse matrix (column vector).", - "type": "method", - "file_path": "scipy\\scipy\\sparse\\_matrix.py", - "ast_data": "FunctionDef name:getcol arguments arg:self arg:j Return return:yes" - }, - { - "library": "coconut", - "name": "get_cache_path", - "source_code": "def get_cache_path(codepath): code_dir, code_fname = os.path.split(codepath) cache_dir = os.path.join(code_dir, coconut_cache_dir) ensure_dir(cache_dir, logger = logger) pickle_fname = code_fname + '.pkl' return os.path.join(cache_dir, pickle_fname)", - "docstring": "Get the cache filename to use for the given codepath.", - "type": "function", - "file_path": "coconut\\coconut\\compiler\\util.py", - "ast_data": "FunctionDef name:get_cache_path arguments arg:codepath Assign Call call:split Assign Call call:join Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "close", - "source_code": "def close(self): if not self._closed: self.flush() self._try_put(self._close_sentinel) self._internal_close()", - "docstring": "Flushes the event file to disk and close the file. Call this method when you do not need the summary writer anymore.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer.py", - "ast_data": "FunctionDef name:close arguments arg:self If" - }, - { - "library": "django", - "name": "flatten", - "source_code": "def flatten(fields): flat = [] for field in fields: if isinstance(field, (list, tuple)): flat.extend(field) else: flat.append(field) return flat", - "docstring": "Return a list which is a single level of flattening of the original list.", - "type": "function", - "file_path": "django\\django\\contrib\\admin\\utils.py", - "ast_data": "FunctionDef name:flatten arguments arg:fields Assign For If Call call:isinstance Return return:yes" - }, - { - "library": "django", - "name": "no_translations", - "source_code": "def no_translations(handle_func): def wrapper(*args, **kwargs): from django.utils import translation saved_locale = translation.get_language() translation.deactivate_all() try: res = handle_func(*args, **kwargs) finally: if saved_locale is not None: translation.activate(saved_locale) return res return wrapper", - "docstring": "Decorator that forces a command to run with translations deactivated.", - "type": "function", - "file_path": "django\\django\\core\\management\\base.py", - "ast_data": "FunctionDef name:no_translations arguments arg:handle_func FunctionDef name:wrapper arguments vararg:args kwarg:kwargs Assign Call call:get_language Try Assign Call call:handle_func If Compare op:IsNot Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "estimate_smallest_singular_value", - "source_code": "def estimate_smallest_singular_value(U): U = np.atleast_2d(U) m, n = U.shape if m ! = n: raise ValueError('A square triangular matrix should be provided.') p = np.zeros(n) w = np.empty(n) for k in range(n): wp = (1 - p[k]) / U.T[k, k] wm = (-1 - p[k]) / U.T[k, k] pp = p[k + 1:] + U.T[k + 1:, k] * wp pm = p[k + 1:] + U.T[k + 1:, k] * wm if abs(wp) + norm(pp, 1) > = abs(wm) + norm(pm, 1): w[k] = wp p[k + 1:] = pp else: w[k] = wm p[k + 1:] = pm v = solve_triangular(U, w) v_norm = norm(v) w_norm = norm(w) s_min = w_norm / v_norm z_min = v / v_norm return (s_min, z_min)", - "docstring": "Given upper triangular matrix ``. The estimation will be better more ill-conditioned is the matrix. References ---------- .. [1] Cline, A. K., Moler, C. B., Stewart, G. W., Wilkinson, J. H. An estimate for the condition number of a matrix. 1979. SIAM Journal on Numerical Analysis, 16(2), 368-375.", - "type": "function", - "file_path": "scipy\\scipy\\optimize\\_trustregion_exact.py", - "ast_data": "FunctionDef name:estimate_smallest_singular_value arguments arg:U Assign Call call:atleast_2d Assign If Compare op:NotEq Raise raises:ValueError('A square triangular matrix should be provided.') Assign Call call:zeros Assign Call call:empty For Call call:range Assign Assign Assign Assign If Compare op:GtE Assign Assign Assign Assign Assign Call call:solve_triangular Assign Call call:norm Assign Call call:norm Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_chief_queue_runner", - "source_code": "def get_chief_queue_runner(self): if self._gradients_applied is False: raise ValueError('Should be called after apply_gradients().') return self._chief_queue_runner", - "docstring": "Returns the QueueRunner for the chief to execute. This includes the operations to synchronize replicas: aggregate gradients, apply to variables, increment global step, insert tokens to token queue. Note that this can only be called after calling apply_gradients() which actually generates this queuerunner. Returns: A for chief to execute. Raises: ValueError: If this is called before apply_gradients().", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\sync_replicas_optimizer.py", - "ast_data": "FunctionDef name:get_chief_queue_runner arguments arg:self If Compare op:Is Raise raises:ValueError('Should be called after apply_gradients().') Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_vertical", - "source_code": "def set_vertical(self, v): self._vertical = v", - "docstring": "Parameters ---------- v : list of :mod: sizes for vertical division", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py", - "ast_data": "FunctionDef name:set_vertical arguments arg:self arg:v Assign" - }, - { - "library": "pytorch", - "name": "named_modules", - "source_code": "def named_modules(self, memo: Optional[set['Module']] = None, prefix: str = '', remove_duplicate: bool = True): if memo is None: memo = set() if self not in memo: if remove_duplicate: memo.add(self) yield (prefix, self) for name, module in self._modules.items(): if module is None: continue submodule_prefix = prefix + ('.' if prefix else '') + name yield from module.named_modules(memo, submodule_prefix, remove_duplicate)", - "docstring": "Return an iterator over all modules in the network, yielding both the name of the module as well as the module itself. Args: memo: a memo to store the set of modules already added to the result prefix: a prefix that will be added to the name of the module remove_duplicate: whether to remove the duplicated module instances in the result or not Yields: (str, Module): Tuple of name and module Note: Duplicate modules are returned only once. In the following example, `` will be returned only once. Example:: >>> l = nn.Linear(2, 2) >>> net = nn.Sequential(l, l) >>> for idx, m in enumerate(net.named_modules()): ... print(idx, '->', m) 0 -> ('', Sequential( (0): Linear(in_features=2, out_features=2, bias=True) (1): Linear(in_features=2, out_features=2, bias=True) )) 1 -> ('0', Linear(in_features=2, out_features=2, bias=True))", - "type": "method", - "file_path": "pytorch\\torch\\nn\\modules\\module.py", - "ast_data": "FunctionDef name:named_modules arguments arg:self arg:memo type:Optional[set['Module']] arg:prefix type:str arg:remove_duplicate type:bool If Compare op:Is Assign Call call:set If Compare op:NotIn If For Call call:items If Compare op:Is Assign" - }, - { - "library": "django", - "name": "mask_hash", - "source_code": "def mask_hash(hash, show = 6, char = '*'): masked = hash[: show] masked + = char * len(hash[show:]) return masked", - "docstring": "Return the given hash, with only the first `` for security reasons.", - "type": "function", - "file_path": "django\\django\\contrib\\auth\\hashers.py", - "ast_data": "FunctionDef name:mask_hash arguments arg:hash arg:show arg:char Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "isbuiltin", - "source_code": "def isbuiltin(object): return _inspect.isbuiltin(tf_decorator.unwrap(object)[1])", - "docstring": "TFDecorator-aware replacement for inspect.isbuiltin.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py", - "ast_data": "FunctionDef name:isbuiltin arguments arg:object Return return:yes" - }, - { - "library": "tensorflow", - "name": "load", - "source_code": "def load(self) -> RepresentativeDatasetMapping: raise NotImplementedError('Method \"load\" is not implemented.')", - "docstring": "Loads the representative datasets. Returns: representative dataset mapping: A loaded signature def key -> representative mapping.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\compiler\\mlir\\quantization\\tensorflow\\python\\representative_dataset.py", - "ast_data": "FunctionDef name:load arguments arg:self Raise raises:NotImplementedError('Method \"load\" is not implemented.')" - }, - { - "library": "numpy", - "name": "real", - "source_code": "@property def real(self): result = self._data.real.view(type(self)) result.__setmask__(self._mask) return result", - "docstring": "The real part of the masked array. This property is a view on the real part of this . See Also -------- imag Examples -------- >>> import numpy as np >>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False]) >>> x.real masked_array(data=[1.0, --, 3.45], mask=[False, True, False], fill_value=1e+20)", - "type": "method", - "file_path": "numpy\\numpy\\ma\\core.py", - "ast_data": "FunctionDef name:real arguments arg:self Assign Call call:view Return return:yes" - }, - { - "library": "tensorflow", - "name": "delete", - "source_code": "def delete(self, dims: List[int]) -> 'Layout': if not isinstance(dims, list): dims = [dims] new_specs = [spec for i, spec in enumerate(self.sharding_specs) if i not in dims] return Layout(new_specs, self.mesh)", - "docstring": "Returns the layout with the give dimensions deleted.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\dtensor\\python\\layout.py", - "ast_data": "FunctionDef name:delete arguments arg:self arg:dims type:List[int] If Assign Assign Return return:yes" - }, - { - "library": "feincms", - "name": "admin_translationinline", - "source_code": "def admin_translationinline(model, inline_class = admin.StackedInline, **kwargs): kwargs['extra'] = 1 kwargs['max_num'] = len(settings.LANGUAGES) kwargs['model'] = model return type(str(model.__class__.__name__ + 'Inline'), (inline_class,), kwargs)", - "docstring": "Returns a new inline type suitable for the Django administration:: from django.contrib import admin from myapp.models import News, NewsTranslation admin.site.register(News, inlines=[ admin_translationinline(NewsTranslation), ], )", - "type": "function", - "file_path": "feincms\\feincms\\translations.py", - "ast_data": "FunctionDef name:admin_translationinline arguments arg:model arg:inline_class kwarg:kwargs Assign Assign Call call:len Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "experimental_logical_device", - "source_code": "def experimental_logical_device(self, logical_device_id): return self.strategy.extended.experimental_logical_device(logical_device_id)", - "docstring": "Places variables and ops on the specified logical device.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\tpu_strategy.py", - "ast_data": "FunctionDef name:experimental_logical_device arguments arg:self arg:logical_device_id Return return:yes" - }, - { - "library": "pytorch", - "name": "purge_old_log_files", - "source_code": "def purge_old_log_files() -> None: for name, table in REGISTERED_METRIC_TABLES.items(): if name in enabled_metric_tables(): filename = table.output_filename() if os.path.exists(filename): os.unlink(filename) table.write_header()", - "docstring": "Purge the old log file at the beginning when the benchmark script runs. Should do it in the parent process rather than the child processes running each individual model.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\metrics.py", - "ast_data": "FunctionDef name:purge_old_log_files arguments For Call call:items If Compare op:In Assign Call call:output_filename If Call call:exists" - }, - { - "library": "tensorflow", - "name": "transform", - "source_code": "def transform(node, ctx, default_to_null_return = True): node = qual_names.resolve(node) node = activity.resolve(node, ctx, None) node = ConditionalReturnRewriter(ctx).visit(node) node = qual_names.resolve(node) node = activity.resolve(node, ctx, None) transformer = ReturnStatementsTransformer(ctx, allow_missing_return = default_to_null_return) node = transformer.visit(node) return node", - "docstring": "Ensure a function has only a single return, at the end.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\converters\\return_statements.py", - "ast_data": "FunctionDef name:transform arguments arg:node arg:ctx arg:default_to_null_return Assign Call call:resolve Assign Call call:resolve Assign Call call:visit Assign Call call:resolve Assign Call call:resolve Assign Call call:ReturnStatementsTransformer Assign Call call:visit Return return:yes" - }, - { - "library": "scipy", - "name": "rvs", - "source_code": "def rvs(self, dim, size = 1, random_state = None): random_state = self._get_random_state(random_state) q = ortho_group.rvs(dim, size, random_state) dets = np.linalg.det(q) if dim: q[..., 0, :] / = dets[..., np.newaxis] return q", - "docstring": "Draw random samples from SO(N). Parameters ---------- dim : integer Dimension of rotation space (N). size : integer, optional Number of samples to draw (default 1). Returns ------- rvs : ndarray or scalar Random size N-dimensional matrices, dimension (size, dim, dim)", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_multivariate.py", - "ast_data": "FunctionDef name:rvs arguments arg:self arg:dim arg:size arg:random_state Assign Call call:_get_random_state Assign Call call:rvs Assign Call call:det If Return return:yes" - }, - { - "library": "tensorflow", - "name": "input_shape", - "source_code": "@property @doc_controls.do_not_doc_inheritable def input_shape(self): if not self._inbound_nodes: raise AttributeError('The layer has never been called and thus has no defined input shape.') all_input_shapes = set([str(node.input_shapes) for node in self._inbound_nodes]) if len(all_input_shapes) = = 1: return self._inbound_nodes[0].input_shapes else: raise AttributeError('The layer \"' + str(self.name) + ' has multiple inbound nodes, with different input shapes. Hence the notion of \"input shape\" is ill-defined for the layer. Use `get_input_shape_at(node_index)` instead.')", - "docstring": "Retrieves the input shape(s) of a layer. Only applicable if the layer has exactly one input, i.e. if it is connected to one incoming layer, or if all inputs have the same shape. Returns: Input shape, as an integer shape tuple (or list of shape tuples, one tuple per input tensor). Raises: AttributeError: if the layer has no defined input_shape. RuntimeError: if called in Eager mode.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", - "ast_data": "FunctionDef name:input_shape arguments arg:self If Raise raises:AttributeError('The layer has never been called and thus has no defined input shape.') Assign Call call:set If Compare op:Eq Return return:yes Raise raises:AttributeError('The layer \"' + str(self.name) + ' has multiple inbound nodes, with different input shapes. Hence the notion of \"input shape\" is ill-defined for the layer. Use `get_input_shape_at(node_index)` instead.')" - }, - { - "library": "pytorch", - "name": "register", - "source_code": "def register(self, *types, **kwargs): def _df(func): self.add(types, func, **kwargs) return func return _df", - "docstring": "register dispatcher with new implementation >>> # xdoctest: +SKIP >>> f = Dispatcher(\"f\") >>> @f.register(int) ... def inc(x): ... return x + 1 >>> @f.register(float) ... def dec(x): ... return x - 1 >>> @f.register(list) ... @f.register(tuple) ... def reverse(x): ... return x[::-1] >>> f(1) 2 >>> f(1.0) 0.0 >>> f([1, 2, 3]) [3, 2, 1]", - "type": "method", - "file_path": "pytorch\\torch\\fx\\experimental\\unification\\multipledispatch\\dispatcher.py", - "ast_data": "FunctionDef name:register arguments arg:self vararg:types kwarg:kwargs FunctionDef name:_df arguments arg:func Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "benchmark_simple_fn", - "source_code": "def benchmark_simple_fn(args, config, module_config, module_type, result): print(f'Benchmarking {module_type.__name__}') f_name = module_config.pt_fn.__name__ + ': Num Operands = ' + str(module_config.num_params) graph_mode_str = 'Graph mode' + ': ' + str(module_config.graph_mode) result_key = ', '.join((f_name, graph_mode_str)) module = WrapperModule(module_type, module_config, args.debug, args.save) latency_per_iter_ms = benchmark_module(config, module, args.use_throughput_benchmark) result[result_key] = latency_per_iter_ms", - "docstring": "Benchmarks a PyTorch traceable function specified in the config. Instantiates a wrapper object that wraps the object of module_type and runs the forward method using benchmark_module. Args: config: contains number of warmup and benchmark iterations. module_config: module_config which contains op, number of parameters that op takes and whether graph mode is enabled or not. module_type: Type of the module to be wrapped. e.g. SimpleAddModule for add op. result: dictionary instance to be populated with the benchmark result (latency per iter).", - "type": "function", - "file_path": "pytorch\\benchmarks\\framework_overhead_benchmark\\framework_overhead_benchmark.py", - "ast_data": "FunctionDef name:benchmark_simple_fn arguments arg:args arg:config arg:module_config arg:module_type arg:result Assign Assign Assign Call call:join Assign Call call:WrapperModule Assign Call call:benchmark_module Assign" - }, - { - "library": "pytorch", - "name": "register_prehook", - "source_code": "@abc.abstractmethod def register_prehook(self, fn: Callable[..., Any]) -> RemovableHandle: raise NotImplementedError", - "docstring": "Register a backward pre-hook. The hook will be called every time a gradient with respect to the Node is computed. The hook should have the following signature:: hook(grad_outputs: Tuple[Tensor]) -> Tuple[Tensor] or None The hook should not modify its argument, but it can optionally return a new gradient which will be used in place of :attr:. This function returns a handle with a method `backward-hooks-execution` for more information on how when this hook is executed, and how its execution is ordered relative to other hooks. Example:: >>> a = torch.tensor([0., 0., 0.], requires_grad=True) >>> b = a.clone() >>> assert isinstance(b.grad_fn, torch.autograd.graph.Node) >>> handle = b.grad_fn.register_prehook(lambda gI: (gI[0] * 2,)) >>> b.sum().backward(retain_graph=True) >>> print(a.grad) tensor([2., 2., 2.]) >>> handle.remove() >>> a.grad = None >>> b.sum().backward(retain_graph=True) >>> print(a.grad) tensor([1., 1., 1.])", - "type": "method", - "file_path": "pytorch\\torch\\autograd\\graph.py", - "ast_data": "FunctionDef name:register_prehook arguments arg:self arg:fn type:Callable[..., Any] Raise raises:NotImplementedError" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, sess): self._sess = sess self._wrapped_is_stoppable = isinstance(self._sess, _WrappedSession)", - "docstring": "Creates a . Args: sess: A or object. The wrapped session.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:sess Assign Assign Call call:isinstance" - }, - { - "library": "pytorch", - "name": "patch", - "source_code": "def patch(self) -> _VirtualizedSerializerContextManager: return _VirtualizedSerializerContextManager(self)", - "docstring": "Returns a context manager which patches the saved values into the current environment. While patched, any value not listed above will be poisoned so that reads will raise an error.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\compile_fx_ext.py", - "ast_data": "FunctionDef name:patch arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "rdist_gen", - "source_code": "class rdist_gen(rv_continuous): def _shape_info(self): return [_ShapeInfo('c', False, (0, np.inf), (False, False))] def _pdf(self, x, c): return np.exp(self._logpdf(x, c)) def _logpdf(self, x, c): return -np.log(2) + beta._logpdf((x + 1) / 2, c / 2, c / 2) def _cdf(self, x, c): return beta._cdf((x + 1) / 2, c / 2, c / 2) def _sf(self, x, c): return beta._sf((x + 1) / 2, c / 2, c / 2) def _ppf(self, q, c): return 2 * beta._ppf(q, c / 2, c / 2) - 1 def _rvs(self, c, size = None, random_state = None): return 2 * random_state.beta(c / 2, c / 2, size) - 1 def _munp(self, n, c): numerator = (1 - n % 2) * sc.beta((n + 1.0) / 2, c / 2.0) return numerator / sc.beta(1.0 / 2, c / 2.0)", - "docstring": "An R-distributed (symmetric beta) continuous random variable. %(before_notes)s Notes ----- The probability density function for is: .. math:: f(x, c) = \\frac{(1-x^2)^{c/2-1}}{B(1/2, c/2)} for :math:, :math:. is also called the symmetric beta distribution: if B has a distribution with parameters (c/2, c/2), then X = 2*B - 1 follows a R-distribution with parameter c. takes `csemicircular` c = 4: Epanechnikov (parabolic) c = 6: quartic (biweight) c = 8: triweight %(after_notes)s %(example)s", - "type": "class", - "file_path": "scipy\\scipy\\stats\\_continuous_distns.py", - "ast_data": "ClassDef name:rdist_gen FunctionDef name:_shape_info arguments arg:self Return return:yes FunctionDef name:_pdf arguments arg:self arg:x arg:c Return return:yes FunctionDef name:_logpdf arguments arg:self arg:x arg:c Return return:yes FunctionDef name:_cdf arguments arg:self arg:x arg:c Return return:yes FunctionDef name:_sf arguments arg:self arg:x arg:c Return return:yes FunctionDef name:_ppf arguments arg:self arg:q arg:c Return return:yes FunctionDef name:_rvs arguments arg:self arg:c arg:size arg:random_state Return return:yes FunctionDef name:_munp arguments arg:self arg:n arg:c Assign Return return:yes" - }, - { - "library": "scipy", - "name": "power", - "source_code": "def power(self, n, dtype = None): if not isscalarlike(n): raise NotImplementedError('input is not scalar') if not n: raise NotImplementedError('zero power is not supported as it would densify the matrix.\\nUse `np.ones(A.shape, dtype = A.dtype)` for this case.') data = self._deduped_data() if dtype is not None: data = data.astype(dtype, copy = False) return self._with_data(data ** n)", - "docstring": "This function performs element-wise power. Parameters ---------- n : scalar n is a non-zero scalar (nonzero avoids dense ones creation) If zero power is desired, special case it to use dtype : If dtype is not specified, the current dtype will be preserved. Raises ------ NotImplementedError : if n is a zero scalar If zero power is desired, special case it to use ``", - "type": "method", - "file_path": "scipy\\scipy\\sparse\\_data.py", - "ast_data": "FunctionDef name:power arguments arg:self arg:n arg:dtype If Raise raises:NotImplementedError('input is not scalar') If Raise raises:NotImplementedError('zero power is not supported as it would densify the matrix.\\nUse `np.ones(A.shape, dtype=A.dtype)` for this case.') Assign Call call:_deduped_data If Compare op:IsNot Assign Call call:astype Return return:yes" - }, - { - "library": "mongo", - "name": "collation", - "source_code": "def collation(self, collation: Optional[_CollationIn]) -> Cursor[_DocumentType]: self._check_okay_to_chain() self._collation = validate_collation_or_none(collation) return self", - "docstring": "Adds a :class: to this query. Raises :exc: if is not an instance of :class: or a `~pymongo.errors.InvalidOperationCursor~pymongo.collation.Collation`.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\cursor.py", - "ast_data": "FunctionDef name:collation arguments arg:self arg:collation type:Optional[_CollationIn] Assign Call call:validate_collation_or_none Return return:yes" - }, - { - "library": "django", - "name": "get_bytes_from_wsgi", - "source_code": "def get_bytes_from_wsgi(environ, key, default): value = environ.get(key, default) return value.encode('iso-8859-1')", - "docstring": "Get a value from the WSGI environ dictionary as bytes. key and default should be strings.", - "type": "function", - "file_path": "django\\django\\core\\handlers\\wsgi.py", - "ast_data": "FunctionDef name:get_bytes_from_wsgi arguments arg:environ arg:key arg:default Assign Call call:get Return return:yes" - }, - { - "library": "scrapy", - "name": "iter_all", - "source_code": "def iter_all(class_name: str) -> Iterable[Any]: for cls, wdict in live_refs.items(): if cls.__name__ = = class_name: return wdict.keys() return []", - "docstring": "Iterate over all objects of the same class by its class name", - "type": "function", - "file_path": "scrapy\\scrapy\\utils\\trackref.py", - "ast_data": "FunctionDef name:iter_all arguments arg:class_name type:str For Call call:items If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "kornia", - "name": "get_spatial_gradient_kernel3d", - "source_code": "def get_spatial_gradient_kernel3d(mode: str, order: int, device: Optional[Device] = None, dtype: Optional[Dtype] = None) -> Tensor: KORNIA_CHECK(mode.lower() in {'sobel', 'diff'}, f'Mode should be `sobel` or `diff`. Got {mode}') KORNIA_CHECK(order in {1, 2}, f'Order should be 1 or 2. Got {order}') if mode = = 'diff' and order = = 1: kernel = get_diff_kernel3d(device = device, dtype = dtype) elif mode = = 'diff' and order = = 2: kernel = get_diff_kernel3d_2nd_order(device = device, dtype = dtype) else: raise NotImplementedError(f'Not implemented 3d gradient kernel for order {order} on mode {mode}') return kernel", - "docstring": "Return kernel for 1st or 2nd order scale pyramid gradients. Uses one of the following operators: sobel, diff.", - "type": "function", - "file_path": "kornia\\kornia\\filters\\kernels.py", - "ast_data": "FunctionDef name:get_spatial_gradient_kernel3d arguments arg:mode type:str arg:order type:int arg:device type:Optional[Device] arg:dtype type:Optional[Dtype] If BoolOp Compare op:Eq Compare op:Eq Assign Call call:get_diff_kernel3d If BoolOp Compare op:Eq Compare op:Eq Assign Call call:get_diff_kernel3d_2nd_order Raise raises:NotImplementedError(f'Not implemented 3d gradient kernel for order {order} on mode {mode}') Return return:yes" - }, - { - "library": "matplotlib", - "name": "out_of_date", - "source_code": "def out_of_date(original, derived, includes = None): if not os.path.exists(derived): return True if includes is None: includes = [] files_to_check = [original, *includes] def out_of_date_one(original, derived_mtime): return os.path.exists(original) and derived_mtime < os.stat(original).st_mtime derived_mtime = os.stat(derived).st_mtime return any((out_of_date_one(f, derived_mtime) for f in files_to_check))", - "docstring": "Return whether *derived* is out-of-date relative to *original* or any of the RST files included in it using the RST include directive (*includes*). *derived* and *original* are full paths, and *includes* is optionally a list of full paths which may have been included in the *original*.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\sphinxext\\plot_directive.py", - "ast_data": "FunctionDef name:out_of_date arguments arg:original arg:derived arg:includes If Return return:yes If Compare op:Is Assign Assign FunctionDef name:out_of_date_one arguments arg:original arg:derived_mtime Return return:yes Assign Return return:yes" - }, - { - "library": "pygame", - "name": "get_arraytypes", - "source_code": "def get_arraytypes(): warnings.warn(DeprecationWarning('only numpy arrays are now supported, this function will be removed in a future version of the module')) return ('numpy',)", - "docstring": "pygame.surfarray.get_arraytypes(): return tuple DEPRECATED - only numpy arrays are now supported.", - "type": "function", - "file_path": "pygame\\src_py\\surfarray.py", - "ast_data": "FunctionDef name:get_arraytypes arguments Return return:yes" - }, - { - "library": "pandas", - "name": "value_counts", - "source_code": "def value_counts(self, dropna: bool = True) -> Series: if self.ndim ! = 1: raise NotImplementedError from pandas import Index, Series if dropna: values = self[~self.isna()]._ndarray else: values = self._ndarray result = value_counts(values, sort = False, dropna = dropna) index_arr = self._from_backing_data(np.asarray(result.index._data)) index = Index(index_arr, name = result.index.name) return Series(result._values, index = index, name = result.name, copy = False)", - "docstring": "Return a Series containing counts of unique values. Parameters ---------- dropna : bool, default True Don't include counts of NA values. Returns ------- Series", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\_mixins.py", - "ast_data": "FunctionDef name:value_counts arguments arg:self arg:dropna type:bool If Compare op:NotEq Raise raises:NotImplementedError If Assign Assign Assign Call call:value_counts Assign Call call:_from_backing_data Assign Call call:Index Return return:yes" - }, - { - "library": "pytorch", - "name": "AnonymousAxis", - "source_code": "class AnonymousAxis: def __init__(self, value: str) -> None: self.value = int(value) if self.value < 1: raise ValueError(f'Anonymous axis should have positive length, not {self.value}') def __repr__(self) -> str: return f'{self.value}-axis'", - "docstring": "Used by to represent an axis with a size (> 1), but no associated identifier. Note: Different instances of this class are not equal to each other, even if they have the same value.", - "type": "class", - "file_path": "pytorch\\functorch\\einops\\_parsing.py", - "ast_data": "ClassDef name:AnonymousAxis FunctionDef name:__init__ arguments arg:self arg:value type:str Assign Call call:int If Compare op:Lt Raise raises:ValueError(f'Anonymous axis should have positive length, not {self.value}') FunctionDef name:__repr__ arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "build_results", - "source_code": "def build_results(self, values): raise NotImplementedError('build_results must be implemented by subclasses')", - "docstring": "Build results that match the original shape of the fetch. Args: values: List of values returned by run(). The values correspond exactly to the list tensors or ops returned by unique_fetches(). Returns: A struct of the same shape as the original fetch object handled by this fetch mapper. In the returned struct, the original fetches are replaced by their fetched values.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\client\\session.py", - "ast_data": "FunctionDef name:build_results arguments arg:self arg:values Raise raises:NotImplementedError('build_results must be implemented by subclasses')" - }, - { - "library": "pandas", - "name": "get_format_datetime64", - "source_code": "def get_format_datetime64(is_dates_only: bool, nat_rep: str = 'NaT', date_format: str | None = None) -> Callable: if is_dates_only: return lambda x: _format_datetime64_dateonly(x, nat_rep = nat_rep, date_format = date_format) else: return lambda x: _format_datetime64(x, nat_rep = nat_rep)", - "docstring": "Return a formatter callable taking a datetime64 as input and providing a string as output", - "type": "function", - "file_path": "pandas\\pandas\\io\\formats\\format.py", - "ast_data": "FunctionDef name:get_format_datetime64 arguments arg:is_dates_only type:bool arg:nat_rep type:str arg:date_format type:str | None If Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "inverse", - "source_code": "def inverse(self, value): raise ValueError('BoundaryNorm is not invertible')", - "docstring": "Raises ------ ValueError BoundaryNorm is not invertible, so calling this method will always raise an error", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\colors.py", - "ast_data": "FunctionDef name:inverse arguments arg:self arg:value Raise raises:ValueError('BoundaryNorm is not invertible')" - }, - { - "library": "django", - "name": "filesizeformat", - "source_code": "@register.filter(is_safe = True) def filesizeformat(bytes_): try: bytes_ = int(bytes_) except (TypeError, ValueError, UnicodeDecodeError): value = ngettext('%(size)d byte', '%(size)d bytes', 0) % {'size': 0} return avoid_wrapping(value) def filesize_number_format(value): return formats.number_format(round(value, 1), 1) KB = 1 << 10 MB = 1 << 20 GB = 1 << 30 TB = 1 << 40 PB = 1 << 50 negative = bytes_ < 0 if negative: bytes_ = -bytes_ if bytes_ < KB: value = ngettext('%(size)d byte', '%(size)d bytes', bytes_) % {'size': bytes_} elif bytes_ < MB: value = gettext('%s KB') % filesize_number_format(bytes_ / KB) elif bytes_ < GB: value = gettext('%s MB') % filesize_number_format(bytes_ / MB) elif bytes_ < TB: value = gettext('%s GB') % filesize_number_format(bytes_ / GB) elif bytes_ < PB: value = gettext('%s TB') % filesize_number_format(bytes_ / TB) else: value = gettext('%s PB') % filesize_number_format(bytes_ / PB) if negative: value = '-%s' % value return avoid_wrapping(value)", - "docstring": "Format the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB, 102 bytes, etc.).", - "type": "function", - "file_path": "django\\django\\template\\defaultfilters.py", - "ast_data": "FunctionDef name:filesizeformat arguments arg:bytes_ Call call:filter Try Assign Call call:int ExceptHandler Assign Return return:yes FunctionDef name:filesize_number_format arguments arg:value Return return:yes Assign Assign Assign Assign Assign Assign Compare op:Lt If Assign If Compare op:Lt Assign If Compare op:Lt Assign If Compare op:Lt Assign If Compare op:Lt Assign If Compare op:Lt Assign Assign If Assign Return return:yes" - }, - { - "library": "kornia", - "name": "cy", - "source_code": "@property def cy(self) -> Tensor: return self.intrinsics[..., 1, 2]", - "docstring": "Return the y-coordinate of the principal point. Returns: tensor of shape :math:.", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py", - "ast_data": "FunctionDef name:cy arguments arg:self Return return:yes" - }, - { - "library": "scikit-learn", - "name": "inverse_transform", - "source_code": "def inverse_transform(self, X): return self._inverse_transform(X, self.dictionary)", - "docstring": "Transform data back to its original space. Parameters ---------- X : array-like of shape (n_samples, n_components) Data to be transformed back. Must have the same number of components as the data used to train the model. Returns ------- X_original : ndarray of shape (n_samples, n_features) Transformed data.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\decomposition\\_dict_learning.py", - "ast_data": "FunctionDef name:inverse_transform arguments arg:self arg:X Return return:yes" - }, - { - "library": "tensorflow", - "name": "assertProtoEqual", - "source_code": "def assertProtoEqual(self, a, b, check_initialized = True, normalize_numbers = False, msg = None, relative_tolerance = None): pool = descriptor_pool.Default() if isinstance(a, str): a = text_format.Parse(a, b.__class__(), descriptor_pool = pool) for pb in (a, b): if check_initialized: errors = pb.FindInitializationErrors() if errors: self.fail('Initialization errors: %s\\n%s' % (errors, pb)) if normalize_numbers: NormalizeNumberFields(pb) if relative_tolerance is not None: checkFloatEqAndReplace(self, expected = b, actual = a, relative_tolerance = relative_tolerance) a_str = text_format.MessageToString(a, descriptor_pool = pool) b_str = text_format.MessageToString(b, descriptor_pool = pool) if len(a_str) < 2 ** 16 and len(b_str) < 2 ** 16: self.assertMultiLineEqual(a_str, b_str, msg = msg) else: diff = ''.join(difflib.unified_diff(a_str.splitlines(True), b_str.splitlines(True))) if diff: self.fail('%s: \\n%s' % (msg, diff))", - "docstring": "Fails with a useful error if a and b aren't equal. Comparison of repeated fields matches the semantics of unittest.TestCase.assertEqual(), ie order and extra duplicates fields matter. Args: self: googletest.TestCase a: proto2 PB instance, or text string representing one. b: proto2 PB instance -- message.Message or subclass thereof. check_initialized: boolean, whether to fail if either a or b isn't initialized. normalize_numbers: boolean, whether to normalize types and precision of numbers before comparison. msg: if specified, is used as the error message on failure. relative_tolerance: float, relative tolerance. If this is not provided, then all floats are compared using string comparison otherwise, floating point comparisons are done using the relative tolerance provided.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\util\\protobuf\\compare.py", - "ast_data": "FunctionDef name:assertProtoEqual arguments arg:self arg:a arg:b arg:check_initialized arg:normalize_numbers arg:msg arg:relative_tolerance Assign Call call:Default If Call call:isinstance Assign Call call:Parse For If Assign Call call:FindInitializationErrors If If If Compare op:IsNot Assign Call call:MessageToString Assign Call call:MessageToString If BoolOp Compare op:Lt Compare op:Lt Assign Call call:join If" - }, - { - "library": "pytorch", - "name": "softmax", - "source_code": "def softmax(input: Tensor, dim: Optional[int] = None, _stacklevel: int = 3, dtype: Optional[DType] = None) -> Tensor: if has_torch_function_unary(input): return handle_torch_function(softmax, (input,), input, dim = dim, _stacklevel = _stacklevel, dtype = dtype) if dim is None: dim = _get_softmax_dim('softmax', input.dim(), _stacklevel) if dtype is None: ret = input.softmax(dim) else: ret = input.softmax(dim, dtype = dtype) return ret", - "docstring": "Apply a softmax function. Softmax is defined as: :math: It is applied to all slices along dim, and will re-scale them so that the elements lie in the range and sum to 1. See :class: for more details. Args: input (Tensor): input dim (int): A dimension along which softmax will be computed. dtype (:class:, optional): the desired data type of returned tensor. If specified, the input tensor is casted to :attr: before the operation is performed. This is useful for preventing data type overflows. Default: None. .. note:: This function doesn't work directly with NLLLoss, which expects the Log to be computed between the Softmax and itself. Use log_softmax instead (it's faster and has better numerical properties).", - "type": "function", - "file_path": "pytorch\\torch\\nn\\functional.py", - "ast_data": "FunctionDef name:softmax arguments arg:input type:Tensor arg:dim type:Optional[int] arg:_stacklevel type:int arg:dtype type:Optional[DType] If Call call:has_torch_function_unary Return return:yes If Compare op:Is Assign Call call:_get_softmax_dim If Compare op:Is Assign Call call:softmax Assign Call call:softmax Return return:yes" - }, - { - "library": "mongo", - "name": "connection_id", - "source_code": "@property def connection_id(self) -> _Address: return self.__connection_id", - "docstring": "The address (host, port) of the server this heartbeat was sent to.", - "type": "method", - "file_path": "mongo\\pymongo\\monitoring.py", - "ast_data": "FunctionDef name:connection_id arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "start_index", - "source_code": "def start_index(self): if self.paginator.count = = 0: return 0 return self.paginator.per_page * (self.number - 1) + 1", - "docstring": "Return the 1-based index of the first object on this page, relative to total objects in the paginator.", - "type": "method", - "file_path": "django\\django\\core\\paginator.py", - "ast_data": "FunctionDef name:start_index arguments arg:self If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "variable_shape", - "source_code": "@property def variable_shape(self): if isinstance(self.categorical_column, fc_types.FeatureColumn): return tensor_shape.TensorShape([1, self.categorical_column.num_buckets]) else: return tensor_shape.TensorShape([1, self.categorical_column._num_buckets])", - "docstring": "Returns a representing the shape of the dense .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", - "ast_data": "FunctionDef name:variable_shape arguments arg:self If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "force_lazy_device", - "source_code": "def force_lazy_device(model: fx.GraphModule): def tolazydevice(dev): if isinstance(dev, torch.device): return torch.device('lazy', index = dev.index) return dev def hasDeviceArg(args, kwargs): return any((isinstance(arg, torch.device) for arg in itertools.chain(args, kwargs.values()))) for nd in model.graph.nodes: nd.args = tuple((tolazydevice(arg) for arg in nd.args)) nd.kwargs = {k: tolazydevice(v) for k, v in nd.kwargs.items()} if nd.target in tensor_factory_functions and (not hasDeviceArg(nd.args, nd.kwargs)): kwargs = dict(nd.kwargs) kwargs['device'] = torch.device('lazy') nd.kwargs = kwargs model.recompile()", - "docstring": "Factory methods in a Fx graph may create tensors for a specific eager devices. If we take no actions, those eager tensors will be mixed with lazy tensors and cause crash. This method overwrite those eager device to lazy device.", - "type": "function", - "file_path": "pytorch\\torch\\_lazy\\extract_compiled_graph.py", - "ast_data": "FunctionDef name:force_lazy_device arguments arg:model type:fx.GraphModule FunctionDef name:tolazydevice arguments arg:dev If Call call:isinstance Return return:yes Return return:yes FunctionDef name:hasDeviceArg arguments arg:args arg:kwargs Return return:yes For Assign Call call:tuple Assign If BoolOp Compare op:In Assign Call call:dict Assign Call call:device Assign" - }, - { - "library": "pytorch", - "name": "PlaceholderInfo", - "source_code": "@dataclasses.dataclass(frozen = True) class PlaceholderInfo: name: str stack_trace: Optional[str] users: list[PlaceholderInfo] mutating_use_stack_trace: Optional[str]", - "docstring": "A serializable version of torch.fx.Node that contains information pertinent to placeholder stack traces. We use these in logging and error messages related to cudagraphs, and will cache these results.", - "type": "class", - "file_path": "pytorch\\torch\\_inductor\\cudagraph_utils.py", - "ast_data": "ClassDef name:PlaceholderInfo Call call:dataclass" - }, - { - "library": "pytorch", - "name": "determine_grid", - "source_code": "def determine_grid(grid: TritonGrid, example_grid: Optional[TritonGrid] = None): if wrapper is None or callable(grid): return (grid, grid) sympy_grid = tuple((_convert_to_sympy_expr(g) for g in grid)) if not example_grid: example_grid = sympy_grid return (wrapper.codegen_python_shape_tuple(sympy_grid), wrapper.codegen_python_shape_tuple(tuple((wrapper.generate_example_arg_value(g, type(g)) for g in example_grid))) if config.triton.autotune_at_compile_time else None)", - "docstring": "This function return a tuple of two values: the first one is for the real grid which is used in the generated code; the second one is an example grid with concreate values which is used in the autotune block to run the generated kernels at compile time.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\codegen\\wrapper.py", - "ast_data": "FunctionDef name:determine_grid arguments arg:grid type:TritonGrid arg:example_grid type:Optional[TritonGrid] If BoolOp Compare op:Is Call call:callable Return return:yes Assign Call call:tuple If Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_flat_tensor_types", - "source_code": "def get_flat_tensor_types(element_spec): return [spec.dtype for spec in get_flat_tensor_specs(element_spec)]", - "docstring": "Returns a list s for the element tensor representation. Args: element_spec: A nested structure of objects representing to element type specification. Returns: A list s for the element tensor representation.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\data\\util\\structure.py", - "ast_data": "FunctionDef name:get_flat_tensor_types arguments arg:element_spec Return return:yes" - }, - { - "library": "sphinx", - "name": "create_translator", - "source_code": "def create_translator(self, *args: Any) -> nodes.NodeVisitor: return self.env._registry.create_translator(self, *args)", - "docstring": "Return an instance of translator. This method returns an instance of `` API.", - "type": "method", - "file_path": "sphinx\\sphinx\\builders\\__init__.py", - "ast_data": "FunctionDef name:create_translator arguments arg:self vararg:args Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_pad", - "source_code": "def set_pad(self, pad): self._pad = pad", - "docstring": "Set the internal pad in points. The actual pad will be the sum of the internal pad and the external pad (the latter is set automatically by the ). Parameters ---------- pad : float The internal pad in points.", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\axisartist\\axis_artist.py", - "ast_data": "FunctionDef name:set_pad arguments arg:self arg:pad Assign" - }, - { - "library": "pandas", - "name": "maybe_set_size", - "source_code": "def maybe_set_size(self, min_itemsize = None) -> None: if self.kind = = 'string': if isinstance(min_itemsize, dict): min_itemsize = min_itemsize.get(self.name) if min_itemsize is not None and self.typ.itemsize < min_itemsize: self.typ = _tables().StringCol(itemsize = min_itemsize, pos = self.pos)", - "docstring": "maybe set a string col itemsize: min_itemsize can be an integer or a dict with this columns name with an integer size", - "type": "method", - "file_path": "pandas\\pandas\\io\\pytables.py", - "ast_data": "FunctionDef name:maybe_set_size arguments arg:self arg:min_itemsize If Compare op:Eq If Call call:isinstance Assign Call call:get If BoolOp Compare op:IsNot Compare op:Lt Assign Call call:StringCol" - }, - { - "library": "mongo", - "name": "advance_cluster_time", - "source_code": "def advance_cluster_time(self, cluster_time: Mapping[str, Any]) -> None: if not isinstance(cluster_time, _Mapping): raise TypeError(f'cluster_time must be a subclass of collections.Mapping, not {type(cluster_time)}') if not isinstance(cluster_time.get('clusterTime'), Timestamp): raise ValueError('Invalid cluster_time') self._advance_cluster_time(cluster_time)", - "docstring": "Update the cluster time for this session. :param cluster_time: The :data: from another instance.", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\client_session.py", - "ast_data": "FunctionDef name:advance_cluster_time arguments arg:self arg:cluster_time type:Mapping[str, Any] If Raise raises:TypeError(f'cluster_time must be a subclass of collections.Mapping, not {type(cluster_time)}') If Raise raises:ValueError('Invalid cluster_time')" - }, - { - "library": "mongo", - "name": "publish_connection_ready", - "source_code": "def publish_connection_ready(self, address: _Address, connection_id: int, duration: float) -> None: event = ConnectionReadyEvent(address, connection_id, duration) for subscriber in self.__cmap_listeners: try: subscriber.connection_ready(event) except Exception: _handle_exception()", - "docstring": "Publish a :class: to all connection listeners.", - "type": "method", - "file_path": "mongo\\pymongo\\monitoring.py", - "ast_data": "FunctionDef name:publish_connection_ready arguments arg:self arg:address type:_Address arg:connection_id type:int arg:duration type:float Assign Call call:ConnectionReadyEvent For Try ExceptHandler" - }, - { - "library": "numpy", - "name": "all", - "source_code": "def all(self, axis = None, out = None, keepdims = np._NoValue): kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims} mask = _check_mask_axis(self._mask, axis, **kwargs) if out is None: d = self.filled(True).all(axis = axis, **kwargs).view(type(self)) if d.ndim: d.__setmask__(mask) elif mask: return masked return d self.filled(True).all(axis = axis, out = out, **kwargs) if isinstance(out, MaskedArray): if out.ndim or mask: out.__setmask__(mask) return out", - "docstring": "Returns True if all elements evaluate to True. The output array is masked where all the values along the given axis are masked: if the output would have been a scalar and that all the values are masked, then the output is . Refer to for full documentation. See Also -------- numpy.ndarray.all : corresponding function for ndarrays numpy.all : equivalent function Examples -------- >>> import numpy as np >>> np.ma.array([1,2,3]).all() True >>> a = np.ma.array([1,2,3], mask=True) >>> (a.all() is np.ma.masked) True", - "type": "method", - "file_path": "numpy\\numpy\\ma\\core.py", - "ast_data": "FunctionDef name:all arguments arg:self arg:axis arg:out arg:keepdims Assign Assign Call call:_check_mask_axis If Compare op:Is Assign Call call:view If If Return return:yes Return return:yes If Call call:isinstance If BoolOp Return return:yes" - }, - { - "library": "flexx", - "name": "key_down", - "source_code": "@event.emitter def key_down(self, e): return self._create_key_event(e)", - "docstring": "Event emitted when a key is pressed down while this widget has focus. A key event has the following attributes: * key: the character corresponding to the key being pressed, or a key name like \"Escape\", \"Alt\", \"Enter\". * modifiers: list of strings \"Alt\", \"Shift\", \"Ctrl\", \"Meta\" for modifier keys pressed down at the time of the event. A browser may associate certain actions with certain key presses. If this browser action is unwanted, it can be disabled by overloading this emitter: .. code-block:: py @event.emitter def key_down(self, e): # Prevent browser's default reaction to function keys ev = super().key_press(e) if ev.key.startswith('F'): e.preventDefault() return ev", - "type": "method", - "file_path": "flexx\\flexx\\ui\\_widget.py", - "ast_data": "FunctionDef name:key_down arguments arg:self arg:e Return return:yes" - }, - { - "library": "scikit-learn", - "name": "default_device", - "source_code": "def default_device(self): return 'cpu'", - "docstring": "The default device used for new NumPy arrays. For NumPy, this always returns ``. See Also -------- __array_namespace_info__.capabilities, __array_namespace_info__.default_dtypes, __array_namespace_info__.dtypes, __array_namespace_info__.devices Returns ------- device : Device The default device used for new NumPy arrays. Examples -------- >>> info = np.__array_namespace_info__() >>> info.default_device() 'cpu'", - "type": "method", - "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\numpy\\_info.py", - "ast_data": "FunctionDef name:default_device arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "extent", - "source_code": "@property def extent(self): from .point import Point env = self.envelope if isinstance(env, Point): xmin, ymin = env.tuple xmax, ymax = (xmin, ymin) else: xmin, ymin = env[0][0] xmax, ymax = env[0][2] return (xmin, ymin, xmax, ymax)", - "docstring": "Return the extent of this geometry as a 4-tuple, consisting of (xmin, ymin, xmax, ymax).", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", - "ast_data": "FunctionDef name:extent arguments arg:self Assign If Call call:isinstance Assign Assign Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "prepare_qat", - "source_code": "def prepare_qat(model, mapping = None, inplace = False): torch._C._log_api_usage_once('quantization_api.quantize.prepare_qat') assert model.training, 'prepare_qat only works on models in training mode' if mapping is None: mapping = get_default_qat_module_mappings() if not inplace: model = copy.deepcopy(model) propagate_qconfig_(model, qconfig_dict = None) convert(model, mapping = mapping, inplace = True, remove_qconfig = False) prepare(model, observer_non_leaf_module_list = set(mapping.values()), inplace = True) return model", - "docstring": "Prepares a copy of the model for quantization calibration or quantization-aware training and converts it to quantized version. Quantization configuration should be assigned preemptively to individual submodules in attribute. Args: model: input model to be modified in-place mapping: dictionary that maps float modules to quantized modules to be replaced. inplace: carry out model transformations in-place, the original module is mutated", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py", - "ast_data": "FunctionDef name:prepare_qat arguments arg:model arg:mapping arg:inplace If Compare op:Is Assign Call call:get_default_qat_module_mappings If Assign Call call:deepcopy Return return:yes" - }, - { - "library": "tensorflow", - "name": "skip", - "source_code": "def skip(self, delta): def update_fn(v): return self._skip_single_var(v, delta) if values_util.is_saving_non_distributed(): return update_fn(self.state) if self._distribution_strategy is not None: with distribute_lib.enter_or_assert_strategy(self._distribution_strategy): if distribute_lib.in_cross_replica_context(): values_util.mark_as_unsaveable() if distribute_lib.in_cross_replica_context() or 'CentralStorage' in type(self._distribution_strategy).__name__: return distribute_lib.get_strategy().extended.update(self.state, update_fn) return update_fn(self.state)", - "docstring": "Advance the counter of a counter-based RNG. Args: delta: the amount of advancement. The state of the RNG after will be the same as that after (or any other distribution). The actual increment added to the counter is an unspecified implementation detail. Returns: A of type .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py", - "ast_data": "FunctionDef name:skip arguments arg:self arg:delta FunctionDef name:update_fn arguments arg:v Return return:yes If Call call:is_saving_non_distributed Return return:yes If Compare op:IsNot With If Call call:in_cross_replica_context If BoolOp Call call:in_cross_replica_context Compare op:In Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "num_slices_in_dimension", - "source_code": "def num_slices_in_dimension(self, axis): if axis < 0: return constant_op.constant(1, dtype = self.dim_size_dtype) elif self.is_ragged(axis): return math_ops.reduce_sum(self._partitioned_dim_sizes[axis]) else: return self.dimension_size(axis) * self.num_slices_in_dimension(axis - 1)", - "docstring": "Returns the total number of slices across the indicated dimension.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_tensor_shape.py", - "ast_data": "FunctionDef name:num_slices_in_dimension arguments arg:self arg:axis If Compare op:Lt Return return:yes If Call call:is_ragged Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "add", - "source_code": "def add(self, key, value, timeout = DEFAULT_TIMEOUT, version = None): raise NotImplementedError('subclasses of BaseCache must provide an add() method')", - "docstring": "Set a value in the cache if the key does not already exist. If timeout is given, use that timeout for the key; otherwise use the default cache timeout. Return True if the value was stored, False otherwise.", - "type": "method", - "file_path": "django\\django\\core\\cache\\backends\\base.py", - "ast_data": "FunctionDef name:add arguments arg:self arg:key arg:value arg:timeout arg:version Raise raises:NotImplementedError('subclasses of BaseCache must provide an add() method')" - }, - { - "library": "matplotlib", - "name": "translated", - "source_code": "def translated(self, tx, ty): return Bbox(self._points + (tx, ty))", - "docstring": "Construct a by translating this one by *tx* and *ty*.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", - "ast_data": "FunctionDef name:translated arguments arg:self arg:tx arg:ty Return return:yes" - }, - { - "library": "scikit-learn", - "name": "load_csv_data", - "source_code": "def load_csv_data(data_file_name, *, data_module = DATA_MODULE, descr_file_name = None, descr_module = DESCR_MODULE, encoding = 'utf-8'): data_path = resources.files(data_module) / data_file_name with data_path.open('r', encoding = 'utf-8') as csv_file: data_file = csv.reader(csv_file) temp = next(data_file) n_samples = int(temp[0]) n_features = int(temp[1]) target_names = np.array(temp[2:]) data = np.empty((n_samples, n_features)) target = np.empty((n_samples,), dtype = int) for i, ir in enumerate(data_file): data[i] = np.asarray(ir[: -1], dtype = np.float64) target[i] = np.asarray(ir[-1], dtype = int) if descr_file_name is None: return (data, target, target_names) else: assert descr_module is not None descr = load_descr(descr_module = descr_module, descr_file_name = descr_file_name) return (data, target, target_names, descr)", - "docstring": "Loads from importlib.resourcesdata_module/data_file_name'wine_data.csv''sklearn.datasets.data'descr_module/descr_file_name'wine_data.rst'load_descrdescr_file_nameload_descr'sklearn.datasets.descr'datadescr_file_namedescr_file_name` is not None. encoding : str, optional Text encoding of the CSV file. .. versionadded:: 1.4", - "type": "function", - "file_path": "scikit-learn\\sklearn\\datasets\\_base.py", - "ast_data": "FunctionDef name:load_csv_data arguments arg:data_file_name Assign With Assign Call call:reader Assign Call call:next Assign Call call:int Assign Call call:int Assign Call call:array Assign Call call:empty Assign Call call:empty For Call call:enumerate Assign Call call:asarray Assign Call call:asarray If Compare op:Is Return return:yes Assign Call call:load_descr Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_compression_type_string", - "source_code": "@classmethod def get_compression_type_string(cls, options): if not options: return '' elif isinstance(options, TFRecordOptions): return cls.get_compression_type_string(options.compression_type) elif isinstance(options, TFRecordCompressionType): return cls.compression_type_map[options] elif options in TFRecordOptions.compression_type_map: return cls.compression_type_map[options] elif options in TFRecordOptions.compression_type_map.values(): return options else: raise ValueError('Not a valid compression_type: \"{}\"'.format(options))", - "docstring": "Convert various option types to a unified string. Args: options: , , or string. Returns: Compression type as string (e.g. , , or ). Raises: ValueError: If compression_type is invalid.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\tf_record.py", - "ast_data": "FunctionDef name:get_compression_type_string arguments arg:cls arg:options If Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes If Compare op:In Return return:yes If Compare op:In Return return:yes Raise raises:ValueError('Not a valid compression_type: \"{}\"'.format(options))" - }, - { - "library": "kornia", - "name": "adjust_contrast_with_mean_subtraction", - "source_code": "def adjust_contrast_with_mean_subtraction(image: Tensor, factor: Union[float, Tensor]) -> Tensor: KORNIA_CHECK_IS_TENSOR(image, 'Expected shape (*, H, W)') KORNIA_CHECK(isinstance(factor, (float, Tensor)), 'Factor should be float or Tensor.') if isinstance(factor, float): factor = torch.as_tensor(factor, device = image.device, dtype = image.dtype) elif isinstance(factor, Tensor): factor = factor.to(image.device, image.dtype) while len(factor.shape) ! = len(image.shape): factor = factor[..., None] if image.shape[-3] = = 3: img_mean = rgb_to_grayscale(image).mean((-2, -1), True) else: img_mean = image.mean() img_adjust: Tensor = image * factor + img_mean * (1 - factor) img_adjust = img_adjust.clamp(min = 0.0, max = 1.0) return img_adjust", - "docstring": "Adjust the contrast of an image tensor by subtracting the mean over channels. .. note:: this is just a convenience function to have compatibility with Pil. For exact definition of image contrast adjustment consider using :func:. Args: image: Image to be adjusted in the shape of :math:. factor: Contrast adjust factor per element in the batch. 0 generates a completely black image, 1 does not modify the input image while any other non-negative number modify the brightness by this factor. Return: Adjusted image in the shape of :math:. Example: >>> import torch >>> x = torch.ones(1, 1, 2, 2) >>> adjust_contrast_with_mean_subtraction(x, 0.5) tensor([[[[1., 1.], [1., 1.]]]]) >>> x = torch.ones(2, 5, 3, 3) >>> y = torch.tensor([0.65, 0.50]) >>> adjust_contrast_with_mean_subtraction(x, y).shape torch.Size([2, 5, 3, 3])", - "type": "function", - "file_path": "kornia\\kornia\\enhance\\adjust.py", - "ast_data": "FunctionDef name:adjust_contrast_with_mean_subtraction arguments arg:image type:Tensor arg:factor type:Union[float, Tensor] If Call call:isinstance Assign Call call:as_tensor If Call call:isinstance Assign Call call:to While Compare op:NotEq Assign If Compare op:Eq Assign Call call:mean Assign Call call:mean Assign Call call:clamp Return return:yes" - }, - { - "library": "mongo", - "name": "decode_file_iter", - "source_code": "def decode_file_iter(file_obj: Union[BinaryIO, IO[bytes]], codec_options: Optional[CodecOptions[_DocumentType]] = None) -> Union[Iterator[dict[str, Any]], Iterator[_DocumentType]]: opts = codec_options or DEFAULT_CODEC_OPTIONS while True: size_data: Any = file_obj.read(4) if not size_data: break elif len(size_data) ! = 4: raise InvalidBSON('cut off in middle of objsize') obj_size = _UNPACK_INT_FROM(size_data, 0)[0] - 4 elements = size_data + file_obj.read(max(0, obj_size)) yield _bson_to_dict(elements, opts)", - "docstring": "Decode bson data from a file to multiple documents as a generator. Works similarly to the decode_all function, but reads from the file object in chunks and parses bson in chunks, yielding one document at a time. :param file_obj: A file object containing BSON data. :param codec_options: An instance of :class:. .. versionchanged:: 3.0 Replaced , , and options with . .. versionadded:: 2.8", - "type": "function", - "file_path": "mongo\\bson\\__init__.py", - "ast_data": "FunctionDef name:decode_file_iter arguments arg:file_obj type:Union[BinaryIO, IO[bytes]] arg:codec_options type:Optional[CodecOptions[_DocumentType]] Assign BoolOp While If If Compare op:NotEq Raise raises:InvalidBSON('cut off in middle of objsize') Assign Assign" - }, - { - "library": "pytorch", - "name": "patch", - "source_code": "def patch(self, frame_dict: dict[str, Any], name: str, new_fn: Callable, deduplicate: bool = True): new_fn.__fx_already_patched = deduplicate if name not in frame_dict and hasattr(builtins, name): self.patches_made.append(_PatchedFnDel(frame_dict, name, None, new_fn)) self.patches_made[-1].patch() elif getattr(frame_dict[name], '__fx_already_patched', False): return else: self.patches_made.append(_PatchedFnSetItem(frame_dict, name, frame_dict[name], new_fn)) self.patches_made[-1].patch()", - "docstring": "Replace frame_dict[name] with new_fn until we exit the context manager.", - "type": "method", - "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py", - "ast_data": "FunctionDef name:patch arguments arg:self arg:frame_dict type:dict[str, Any] arg:name type:str arg:new_fn type:Callable arg:deduplicate type:bool Assign If BoolOp Compare op:NotIn Call call:hasattr If Call call:getattr Return return:no" - }, - { - "library": "pytorch", - "name": "adaptive_max_pool3d_with_indices", - "source_code": "def adaptive_max_pool3d_with_indices(input: Tensor, output_size: BroadcastingList3[int], return_indices: bool = False) -> tuple[Tensor, Tensor]: if has_torch_function_unary(input): return handle_torch_function(adaptive_max_pool3d_with_indices, (input,), input, output_size, return_indices = return_indices) output_size = _list_with_default(output_size, input.size()) return torch._C._nn.adaptive_max_pool3d(input, output_size)", - "docstring": "adaptive_max_pool3d(input, output_size, return_indices=False) Applies a 3D adaptive max pooling over an input signal composed of several input planes. See :class: for details and output shape. Args: output_size: the target output size (single integer or triple-integer tuple) return_indices: whether to return pooling indices. Default: ``", - "type": "function", - "file_path": "pytorch\\torch\\nn\\functional.py", - "ast_data": "FunctionDef name:adaptive_max_pool3d_with_indices arguments arg:input type:Tensor arg:output_size type:BroadcastingList3[int] arg:return_indices type:bool If Call call:has_torch_function_unary Return return:yes Assign Call call:_list_with_default Return return:yes" - }, - { - "library": "tensorflow", - "name": "number_of_shards", - "source_code": "@property def number_of_shards(self): return self._sharding_policies[0].number_of_shards", - "docstring": "Gets the number of shards to use for the InfeedQueue. Returns: Number of shards or None if the number of shards has not been set.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py", - "ast_data": "FunctionDef name:number_of_shards arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "sparse_tensor_to_dense", - "source_code": "@tf_export('sparse.to_dense', v1 = ['sparse.to_dense', 'sparse_tensor_to_dense']) @deprecation.deprecated_endpoints('sparse_tensor_to_dense') def sparse_tensor_to_dense(sp_input, default_value = None, validate_indices = True, name = None): sp_input = _convert_to_sparse_tensor(sp_input) if default_value is None: default_value = array_ops.zeros([], dtype = sp_input.dtype) return gen_sparse_ops.sparse_to_dense(sp_input.indices, sp_input.dense_shape, sp_input.values, default_value = default_value, validate_indices = validate_indices, name = name)", - "docstring": "Converts a into a dense tensor. For this sparse tensor with three non-empty values: >>> sp_input = tf.sparse.SparseTensor( ... dense_shape=[3, 5], ... values=[7, 8, 9], ... indices =[[0, 1], ... [0, 3], ... [2, 0]]) The output will be a dense tensor with values: >>> tf.sparse.to_dense(sp_input).numpy() array([[0, 7, 0, 8, 0], [0, 0, 0, 0, 0], [9, 0, 0, 0, 0]], dtype=int32) Note: Indices must be without repeats. This is only tested if is . Args: sp_input: The input . default_value: Scalar value to set for indices not specified in . Defaults to zero. validate_indices: A boolean value. If , indices are checked to make sure they are sorted in lexicographic order and that there are no repeats. name: A name prefix for the returned tensors (optional). Returns: A dense tensor with shape and values specified by the non-empty values in . Indices not in are assigned . Raises: TypeError: If is not a .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py", - "ast_data": "FunctionDef name:sparse_tensor_to_dense arguments arg:sp_input arg:default_value arg:validate_indices arg:name Call call:tf_export Call call:deprecated_endpoints Assign Call call:_convert_to_sparse_tensor If Compare op:Is Assign Call call:zeros Return return:yes" - }, - { - "library": "pandas", - "name": "codes", - "source_code": "@property def codes(self) -> FrozenList: return self._codes", - "docstring": "Codes of the MultiIndex. Codes are the position of the index value in the list of level values for each level. Returns ------- tuple of numpy.ndarray The codes of the MultiIndex. Each array in the tuple corresponds to a level in the MultiIndex. See Also -------- MultiIndex.set_codes : Set new codes on MultiIndex. Examples -------- >>> arrays = [[1, 1, 2, 2], [\"red\", \"blue\", \"red\", \"blue\"]] >>> mi = pd.MultiIndex.from_arrays(arrays, names=(\"number\", \"color\")) >>> mi.codes FrozenList([[0, 0, 1, 1], [1, 0, 1, 0]])", - "type": "method", - "file_path": "pandas\\pandas\\core\\indexes\\multi.py", - "ast_data": "FunctionDef name:codes arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "impulse", - "source_code": "def impulse(self, X0 = None, T = None, N = None): return impulse(self, X0 = X0, T = T, N = N)", - "docstring": "Return the impulse response of a continuous-time system. See for details.", - "type": "method", - "file_path": "scipy\\scipy\\signal\\_ltisys.py", - "ast_data": "FunctionDef name:impulse arguments arg:self arg:X0 arg:T arg:N Return return:yes" - }, - { - "library": "tensorflow", - "name": "read_model", - "source_code": "def read_model(input_tflite_file): if not gfile.Exists(input_tflite_file): raise RuntimeError('Input file not found at %r\\n' % input_tflite_file) with gfile.GFile(input_tflite_file, 'rb') as input_file_handle: model_bytearray = bytearray(input_file_handle.read()) return read_model_from_bytearray(model_bytearray)", - "docstring": "Reads a tflite model as a python object. Args: input_tflite_file: Full path name to the input tflite file Raises: RuntimeError: If input_tflite_file path is invalid. IOError: If input_tflite_file cannot be opened. Returns: A python object corresponding to the input tflite file.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py", - "ast_data": "FunctionDef name:read_model arguments arg:input_tflite_file If Raise raises:RuntimeError('Input file not found at %r\\n' % input_tflite_file) With Assign Call call:bytearray Return return:yes" - }, - { - "library": "cherrypy", - "name": "do_login", - "source_code": "def do_login(self, username, password, from_page = '..', **kwargs): response = cherrypy.serving.response error_msg = self.check_username_and_password(username, password) if error_msg: body = self.login_screen(from_page, username, error_msg) response.body = body if 'Content-Length' in response.headers: del response.headers['Content-Length'] return True else: cherrypy.serving.request.login = username cherrypy.session[self.session_key] = username self.on_login(username) raise cherrypy.HTTPRedirect(from_page or '/')", - "docstring": "Login. May raise redirect, or return True if request handled.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\lib\\cptools.py", - "ast_data": "FunctionDef name:do_login arguments arg:self arg:username arg:password arg:from_page kwarg:kwargs Assign Assign Call call:check_username_and_password If Assign Call call:login_screen Assign If Compare op:In Return return:yes Assign Assign Raise raises:cherrypy.HTTPRedirect(from_page or '/')" - }, - { - "library": "matplotlib", - "name": "get_celld", - "source_code": "def get_celld(self): return self._cells", - "docstring": "Return a dict of cells in the table mapping *(row, column)* to \\s. Notes ----- You can also directly index into the Table object to access individual cells:: cell = table[row, col]", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\table.py", - "ast_data": "FunctionDef name:get_celld arguments arg:self Return return:yes" - }, - { - "library": "mongo", - "name": "protocol", - "source_code": "@property def protocol(self) -> int: return self._protocol", - "docstring": "The protocol version chosen when constructing the context. This attribute is read-only.", - "type": "method", - "file_path": "mongo\\pymongo\\pyopenssl_context.py", - "ast_data": "FunctionDef name:protocol arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "union", - "source_code": "@staticmethod def union(bboxes): if not len(bboxes): raise ValueError(\"'bboxes' cannot be empty\") x0 = np.min([bbox.xmin for bbox in bboxes]) x1 = np.max([bbox.xmax for bbox in bboxes]) y0 = np.min([bbox.ymin for bbox in bboxes]) y1 = np.max([bbox.ymax for bbox in bboxes]) return Bbox([[x0, y0], [x1, y1]])", - "docstring": "Return a that contains all of the given *bboxes*.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", - "ast_data": "FunctionDef name:union arguments arg:bboxes If Raise raises:ValueError(\"'bboxes' cannot be empty\") Assign Call call:min Assign Call call:max Assign Call call:min Assign Call call:max Return return:yes" - }, - { - "library": "numpy", - "name": "lagweight", - "source_code": "def lagweight(x): w = np.exp(-x) return w", - "docstring": "Weight function of the Laguerre polynomials. The weight function is :math: and the interval of integration is :math:. The Laguerre polynomials are orthogonal, but not normalized, with respect to this weight function. Parameters ---------- x : array_like Values at which the weight function will be computed. Returns ------- w : ndarray The weight function at . Examples -------- >>> from numpy.polynomial.laguerre import lagweight >>> x = np.array([0, 1, 2]) >>> lagweight(x) array([1. , 0.36787944, 0.13533528])", - "type": "function", - "file_path": "numpy\\numpy\\polynomial\\laguerre.py", - "ast_data": "FunctionDef name:lagweight arguments arg:x Assign Call call:exp Return return:yes" - }, - { - "library": "tensorflow", - "name": "validate_inputs", - "source_code": "def validate_inputs(x, y): if isinstance(x, iterator_ops.Iterator) or isinstance(y, iterator_ops.Iterator): raise ValueError('`DistributionStrategy` does not support inputs of type Iterator. You must pass a `tf.data.Dataset` object or a numpy array as input.')", - "docstring": "Validate inputs when using DistributionStrategy. Args: x: Model Inputs. y: Model Targets. Raises: ValueError: if input is not a Dataset or a numpy array(when we use MirroredStrategy).", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_training_utils_v1.py", - "ast_data": "FunctionDef name:validate_inputs arguments arg:x arg:y If BoolOp Call call:isinstance Call call:isinstance Raise raises:ValueError('`DistributionStrategy` does not support inputs of type Iterator. You must pass a `tf.data.Dataset` object or a numpy array as input.')" - }, - { - "library": "kornia", - "name": "from_name", - "source_code": "@staticmethod def from_name(name: str) -> Sam: if name in ['vit_b', 'vit_l', 'vit_h', 'mobile_sam']: return Sam.from_config(SamConfig(name)) else: raise ValueError(f'Invalid SAM model name: {name}')", - "docstring": "Build/load the SAM model based on it's name. Args: name: The name of the SAM model. Valid names are: - 'vit_b' - 'vit_l' - 'vit_h' - 'mobile_sam' Returns: The respective SAM model", - "type": "method", - "file_path": "kornia\\kornia\\contrib\\models\\sam\\model.py", - "ast_data": "FunctionDef name:from_name arguments arg:name type:str If Compare op:In Return return:yes Raise raises:ValueError(f'Invalid SAM model name: {name}')" - }, - { - "library": "scipy", - "name": "iterate_delaunay", - "source_code": "def iterate_delaunay(self): self.nc + = self.n self.sampled_surface(infty_cons_sampl = self.infty_cons_sampl) if self.disp: logging.info(f'self.n = {self.n}') logging.info(f'self.nc = {self.nc}') logging.info('Constructing and refining simplicial complex graph structure from sampling points.') if self.dim < 2: self.Ind_sorted = np.argsort(self.C, axis = 0) self.Ind_sorted = self.Ind_sorted.flatten() tris = [] for ind, ind_s in enumerate(self.Ind_sorted): if ind > 0: tris.append(self.Ind_sorted[ind - 1: ind + 1]) tris = np.array(tris) self.Tri = namedtuple('Tri', ['points', 'simplices'])(self.C, tris) self.points = {} else: if self.C.shape[0] > self.dim + 1: self.delaunay_triangulation(n_prc = self.n_prc) self.n_prc = self.C.shape[0] if self.disp: logging.info('Triangulation completed, evaluating all constraints and objective function values.') if hasattr(self, 'Tri'): self.HC.vf_to_vv(self.Tri.points, self.Tri.simplices) if self.disp: logging.info('Triangulation completed, evaluating all constraints and objective function values.') self.HC.V.process_pools() if self.disp: logging.info('Evaluations completed.') self.fn = self.HC.V.nfev self.n_sampled = self.nc return", - "docstring": "Build a complex of Delaunay triangulated points Note: called with `` after class initiation", - "type": "method", - "file_path": "scipy\\scipy\\optimize\\_shgo.py", - "ast_data": "FunctionDef name:iterate_delaunay arguments arg:self If If Compare op:Lt Assign Call call:argsort Assign Call call:flatten Assign For Call call:enumerate If Compare op:Gt Assign Call call:array Assign Call Assign If Compare op:Gt Assign If If Call call:hasattr If If Assign Assign Return return:no" - }, - { - "library": "pandas", - "name": "infer_freq", - "source_code": "def infer_freq(index: DatetimeIndex | TimedeltaIndex | Series | DatetimeLikeArrayMixin) -> str | None: from pandas.core.api import DatetimeIndex if isinstance(index, ABCSeries): values = index._values if not (lib.is_np_dtype(values.dtype, 'mM') or isinstance(values.dtype, DatetimeTZDtype) or values.dtype = = object): raise TypeError(f'cannot infer freq from a non-convertible dtype on a Series of {index.dtype}') index = values inferer: _FrequencyInferer if not hasattr(index, 'dtype'): pass elif isinstance(index.dtype, PeriodDtype): raise TypeError('PeriodIndex given. Check the `freq` attribute instead of using infer_freq.') elif lib.is_np_dtype(index.dtype, 'm'): inferer = _TimedeltaFrequencyInferer(index) return inferer.get_freq() elif is_numeric_dtype(index.dtype): raise TypeError(f'cannot infer freq from a non-convertible index of dtype {index.dtype}') if not isinstance(index, DatetimeIndex): index = DatetimeIndex(index) inferer = _FrequencyInferer(index) return inferer.get_freq()", - "docstring": "Infer the most likely frequency given the input index. This method attempts to deduce the most probable frequency (e.g., 'D' for daily, 'H' for hourly) from a sequence of datetime-like objects. It is particularly useful when the frequency of a time series is not explicitly set or known but can be inferred from its values. Parameters ---------- index : DatetimeIndex, TimedeltaIndex, Series or array-like If passed a Series will use the values of the series (NOT THE INDEX). Returns ------- str or None None if no discernible frequency. Raises ------ TypeError If the index is not datetime-like. ValueError If there are fewer than three values. See Also -------- date_range : Return a fixed frequency DatetimeIndex. timedelta_range : Return a fixed frequency TimedeltaIndex with day as the default. period_range : Return a fixed frequency PeriodIndex. DatetimeIndex.freq : Return the frequency object if it is set, otherwise None. Examples -------- >>> idx = pd.date_range(start=\"2020/12/01\", end=\"2020/12/30\", periods=30) >>> pd.infer_freq(idx) 'D'", - "type": "function", - "file_path": "pandas\\pandas\\tseries\\frequencies.py", - "ast_data": "FunctionDef name:infer_freq arguments arg:index type:DatetimeIndex | TimedeltaIndex | Series | DatetimeLikeArrayMixin If Call call:isinstance Assign If Raise raises:TypeError(f'cannot infer freq from a non-convertible dtype on a Series of {index.dtype}') Assign If If Call call:isinstance Raise raises:TypeError('PeriodIndex given. Check the `freq` attribute instead of using infer_freq.') If Call call:is_np_dtype Assign Call call:_TimedeltaFrequencyInferer Return return:yes If Call call:is_numeric_dtype Raise raises:TypeError(f'cannot infer freq from a non-convertible index of dtype {index.dtype}') If Assign Call call:DatetimeIndex Assign Call call:_FrequencyInferer Return return:yes" - }, - { - "library": "kornia", - "name": "quaternion_to_axis_angle", - "source_code": "def quaternion_to_axis_angle(quaternion: Tensor) -> Tensor: if not torch.is_tensor(quaternion): raise TypeError(f'Input type is not a Tensor. Got {type(quaternion)}') if not quaternion.shape[-1] = = 4: raise ValueError(f'Input must be a tensor of shape Nx4 or 4. Got {quaternion.shape}') q1: Tensor = tensor([]) q2: Tensor = tensor([]) q3: Tensor = tensor([]) cos_theta: Tensor = tensor([]) cos_theta = quaternion[..., 0] q1 = quaternion[..., 1] q2 = quaternion[..., 2] q3 = quaternion[..., 3] sin_squared_theta: Tensor = q1 * q1 + q2 * q2 + q3 * q3 sin_theta: Tensor = torch.sqrt(sin_squared_theta) two_theta: Tensor = 2.0 * where(cos_theta < 0.0, torch.atan2(-sin_theta, -cos_theta), torch.atan2(sin_theta, cos_theta)) k_pos: Tensor = two_theta / sin_theta k_neg: Tensor = 2.0 * torch.ones_like(sin_theta) k: Tensor = where(sin_squared_theta > 0.0, k_pos, k_neg) axis_angle: Tensor = torch.zeros_like(quaternion)[..., : 3] axis_angle[..., 0] + = q1 * k axis_angle[..., 1] + = q2 * k axis_angle[..., 2] + = q3 * k return axis_angle", - "docstring": "Convert quaternion vector to axis angle of rotation in radians. The quaternion should be in (w, x, y, z) format. Adapted from ceres C++ library: ceres-solver/include/ceres/rotation.h Args: quaternion: tensor with quaternions. Return: tensor with axis angle of rotation. Shape: - Input: :math: where means, any number of dimensions - Output: :math: Example: >>> quaternion = tensor((1., 0., 0., 0.)) >>> quaternion_to_axis_angle(quaternion) tensor([0., 0., 0.])", - "type": "function", - "file_path": "kornia\\kornia\\geometry\\conversions.py", - "ast_data": "FunctionDef name:quaternion_to_axis_angle arguments arg:quaternion type:Tensor If Raise raises:TypeError(f'Input type is not a Tensor. Got {type(quaternion)}') If Raise raises:ValueError(f'Input must be a tensor of shape Nx4 or 4. Got {quaternion.shape}') Assign Assign Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_default_mesh", - "source_code": "@tf_export('experimental.dtensor.get_default_mesh', v1 = []) def get_default_mesh() -> Optional[layout_lib.Mesh]: if _dtensor_singleton is None: return None else: return _dtensor_singleton._current_default_mesh", - "docstring": "Return the default mesh under the current dtensor device context. In the case that dtensor device system is not initialized, this function will return None. Returns: The current default mesh for the dtensor device context.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\dtensor\\python\\api.py", - "ast_data": "FunctionDef name:get_default_mesh arguments Call call:tf_export If Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "experimental_as_proto", - "source_code": "def experimental_as_proto(self) -> types_pb2.SerializedDType: return types_pb2.SerializedDType(datatype = self._type_enum)", - "docstring": "Returns a proto representation of the Dtype instance.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py", - "ast_data": "FunctionDef name:experimental_as_proto arguments arg:self Return return:yes" - }, - { - "library": "numpy", - "name": "lagline", - "source_code": "def lagline(off, scl): if scl ! = 0: return np.array([off + scl, -scl]) else: return np.array([off])", - "docstring": "Laguerre series whose graph is a straight line. Parameters ---------- off, scl : scalars The specified line is given by ``. See Also -------- numpy.polynomial.polynomial.polyline numpy.polynomial.chebyshev.chebline numpy.polynomial.legendre.legline numpy.polynomial.hermite.hermline numpy.polynomial.hermite_e.hermeline Examples -------- >>> from numpy.polynomial.laguerre import lagline, lagval >>> lagval(0,lagline(3, 2)) 3.0 >>> lagval(1,lagline(3, 2)) 5.0", - "type": "function", - "file_path": "numpy\\numpy\\polynomial\\laguerre.py", - "ast_data": "FunctionDef name:lagline arguments arg:off arg:scl If Compare op:NotEq Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "inplace_update_buffers", - "source_code": "@property @cache_on_self def inplace_update_buffers(self): for k in self.kernels[1:]: assert k.inplace_update_buffers = = self.kernels[0].inplace_update_buffers return self.kernels[0].inplace_update_buffers", - "docstring": "Make sure all kernels have the same inplace update mappings.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\multi_kernel.py", - "ast_data": "FunctionDef name:inplace_update_buffers arguments arg:self For Return return:yes" - }, - { - "library": "tensorflow", - "name": "placeholder_value", - "source_code": "@doc_controls.do_not_doc_inheritable def placeholder_value(self, placeholder_context): if placeholder_context.unnest_only: return self component_placeholders = nest.map_structure(lambda x: x.placeholder_value(placeholder_context), self._component_specs) return self._from_components(component_placeholders)", - "docstring": "Value used for tracing a function signature with this TraceType. WARNING: Do not override. Args: placeholder_context: A class container for context information when creating a placeholder value. Returns: A placeholder whose components are recursively composed of placeholders themselves.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py", - "ast_data": "FunctionDef name:placeholder_value arguments arg:self arg:placeholder_context If Return return:yes Assign Call call:map_structure Return return:yes" - }, - { - "library": "tensorflow", - "name": "update_state", - "source_code": "def update_state(self, values, sample_weight = None): values = math_ops.cast(values, self._dtype) if not self._built: self._build(values.shape) elif values.shape ! = self._shape: raise ValueError('MeanTensor input values must always have the same shape. Expected shape (set during the first call): {}. Got: {}'.format(self._shape, values.shape)) num_values = array_ops.ones_like(values) if sample_weight is not None: sample_weight = math_ops.cast(sample_weight, self._dtype) values, _, sample_weight = losses_utils.squeeze_or_expand_dimensions(values, sample_weight = sample_weight) try: sample_weight = weights_broadcast_ops.broadcast_weights(sample_weight, values) except ValueError: ndim = backend.ndim(values) weight_ndim = backend.ndim(sample_weight) values = math_ops.reduce_mean(values, axis = list(range(weight_ndim, ndim))) num_values = math_ops.multiply(num_values, sample_weight) values = math_ops.multiply(values, sample_weight) update_total_op = self._total.assign_add(values) with ops.control_dependencies([update_total_op]): return self._count.assign_add(num_values)", - "docstring": "Accumulates statistics for computing the element-wise mean. Args: values: Per-example value. sample_weight: Optional weighting of each example. Defaults to 1. Returns: Update op.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", - "ast_data": "FunctionDef name:update_state arguments arg:self arg:values arg:sample_weight Assign Call call:cast If If Compare op:NotEq Raise raises:ValueError('MeanTensor input values must always have the same shape. Expected shape (set during the first call): {}. Got: {}'.format(self._shape, values.shape)) Assign Call call:ones_like If Compare op:IsNot Assign Call call:cast Assign Call call:squeeze_or_expand_dimensions Try Assign Call call:broadcast_weights ExceptHandler Assign Call call:ndim Assign Call call:ndim Assign Call call:reduce_mean Assign Call call:multiply Assign Call call:multiply Assign Call call:assign_add With Return return:yes" - }, - { - "library": "pytorch", - "name": "write_main", - "source_code": "def write_main(self, install_root, oss, symbol_name): with open(os.path.join(install_root, 'main.c'), 'w') as outfp: outfp.write(MAIN_INCLUDES) for m in self.frozen_modules: outfp.write(f'extern unsigned char {m.c_name}[];\\n') outfp.write(MAIN_PREFIX_TEMPLATE.format(symbol_name)) for m in self.frozen_modules: outfp.write(f'\\t{{\"{m.module_name}\", {m.c_name}, {m.size}}}, \\n') outfp.write(MAIN_SUFFIX) if oss: outfp.write(FAKE_PREFIX) outfp.write(MAIN_SUFFIX)", - "docstring": "Write the file containing a table enumerating all the frozen modules.", - "type": "method", - "file_path": "pytorch\\torch\\utils\\_freeze.py", - "ast_data": "FunctionDef name:write_main arguments arg:self arg:install_root arg:oss arg:symbol_name With For For If" - }, - { - "library": "pytorch", - "name": "is_buffer", - "source_code": "def is_buffer(program: 'ExportedProgram', node: torch.fx.Node) -> bool: return node.name in program.graph_signature.inputs_to_buffers", - "docstring": "Checks if the given node is a buffer within the exported program", - "type": "function", - "file_path": "pytorch\\torch\\_export\\utils.py", - "ast_data": "FunctionDef name:is_buffer arguments arg:program type:'ExportedProgram' arg:node type:torch.fx.Node Return return:yes" - }, - { - "library": "pytorch", - "name": "__init__", - "source_code": "def __init__(self, path: Union[str, os.PathLike], single_file_per_rank: bool = True, sync_files: bool = True, thread_count: int = 1, per_thread_copy_ahead: int = 10000000, overwrite: bool = True, _extensions: Optional[Sequence[StreamTransformExtension]] = None, serialization_format: SerializationFormat = SerializationFormat.TORCH_SAVE, **kwargs) -> None: super().__init__(path, single_file_per_rank, sync_files, thread_count, per_thread_copy_ahead, overwrite = overwrite, _extensions = _extensions, serialization_format = serialization_format) self.fs = FileSystem() self.path = self.fs.init_path(path, **kwargs)", - "docstring": "Initialize the writer pointing to . Args: path: directory where the checkpoint will be written to. single_file_per_rank: Produce one file per rank instead of one file per tensor/blob. Default to True. sync_files : force files to be synced to permanent storage. Default to True. thread_count: Number of IO threads to use to write. Default to 1. per_thread_copy_ahead: How many bytes to copy from the GPU ahead of saving then. Default 10Mb. overwrite: Whether to allow overwriting existing checkpoints. Defaults to True. _extensions: Extensions to apply to output streams (EXPERIMENTAL) N. B. If sync_files is disabled, there's no guarantee that the checkpoint will be consistent in the case of a failure.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\checkpoint\\_fsspec_filesystem.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:path type:Union[str, os.PathLike] arg:single_file_per_rank type:bool arg:sync_files type:bool arg:thread_count type:int arg:per_thread_copy_ahead type:int arg:overwrite type:bool arg:_extensions type:Optional[Sequence[StreamTransformExtension]] arg:serialization_format type:SerializationFormat kwarg:kwargs Assign Call call:FileSystem Assign Call call:init_path" - }, - { - "library": "kornia", - "name": "unsharp_mask", - "source_code": "def unsharp_mask(input: Tensor, kernel_size: tuple[int, int] | int, sigma: tuple[float, float] | Tensor, border_type: str = 'reflect') -> Tensor: data_blur: Tensor = gaussian_blur2d(input, kernel_size, sigma, border_type) data_sharpened: Tensor = input + (input - data_blur) return data_sharpened", - "docstring": "Create an operator that sharpens a tensor by applying operation out = 2 * image - gaussian_blur2d(image). .. image:: _static/img/unsharp_mask.png Args: input: the input tensor with shape :math:. kernel_size: the size of the kernel. sigma: the standard deviation of the kernel. border_type: the padding mode to be applied before convolving. The expected modes are: `(B,C,H,W)`. Examples: >>> input = torch.rand(2, 4, 5, 5) >>> output = unsharp_mask(input, (3, 3), (1.5, 1.5)) >>> output.shape torch.Size([2, 4, 5, 5])", - "type": "function", - "file_path": "kornia\\kornia\\filters\\unsharp.py", - "ast_data": "FunctionDef name:unsharp_mask arguments arg:input type:Tensor arg:kernel_size type:tuple[int, int] | int arg:sigma type:tuple[float, float] | Tensor arg:border_type type:str Return return:yes" - }, - { - "library": "pandas", - "name": "sem", - "source_code": "@final def sem(self, ddof: int = 1, numeric_only: bool = False): return self._downsample('sem', ddof = ddof, numeric_only = numeric_only)", - "docstring": "Compute standard error of the mean of groups, excluding missing values. For multiple groupings, the result index will be a MultiIndex. Parameters ---------- ddof : int, default 1 Degrees of freedom. numeric_only : bool, default False Include only , or data. .. versionadded:: 1.5.0 .. versionchanged:: 2.0.0 numeric_only now defaults to ``. Returns ------- Series or DataFrame Standard error of the mean of values within each group. See Also -------- DataFrame.sem : Return unbiased standard error of the mean over requested axis. Series.sem : Return unbiased standard error of the mean over requested axis. Examples -------- >>> ser = pd.Series( ... [1, 3, 2, 4, 3, 8], ... index=pd.DatetimeIndex( ... [ ... \"2023-01-01\", ... \"2023-01-10\", ... \"2023-01-15\", ... \"2023-02-01\", ... \"2023-02-10\", ... \"2023-02-15\", ... ] ... ), ... ) >>> ser.resample(\"MS\").sem() 2023-01-01 0.577350 2023-02-01 1.527525 Freq: MS, dtype: float64", - "type": "method", - "file_path": "pandas\\pandas\\core\\resample.py", - "ast_data": "FunctionDef name:sem arguments arg:self arg:ddof type:int arg:numeric_only type:bool Return return:yes" - }, - { - "library": "mongo", - "name": "bson_encode", - "source_code": "def bson_encode(self, doc: MutableMapping[str, Any]) -> bytes: return encode(doc)", - "docstring": "Encode a document to BSON. A document can be any mapping type (like :class:). :param doc: mapping type representing a document :return: The encoded BSON bytes.", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\encryption.py", - "ast_data": "FunctionDef name:bson_encode arguments arg:self arg:doc type:MutableMapping[str, Any] Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_xy", - "source_code": "def set_xy(self, xy): xy = np.asarray(xy) nverts, _ = xy.shape if self._closed: if nverts = = 1 or (nverts > 1 and (xy[0] ! = xy[-1]).any()): xy = np.concatenate([xy, [xy[0]]]) elif nverts > 2 and (xy[0] = = xy[-1]).all(): xy = xy[: -1] self._path = Path(xy, closed = self._closed) self.stale = True", - "docstring": "Set the vertices of the polygon. Parameters ---------- xy : (N, 2) array-like The coordinates of the vertices. Notes ----- Unlike , we do not ignore the last input vertex. If the polygon is meant to be closed, and the last point of the polygon is not equal to the first, we assume that the user has not explicitly passed a `` vertex, and add it ourselves.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:set_xy arguments arg:self arg:xy Assign Call call:asarray Assign If If BoolOp Compare op:Eq BoolOp Compare op:Gt Call call:any Assign Call call:concatenate If BoolOp Compare op:Gt Call call:all Assign Assign Call call:Path Assign" - }, - { - "library": "pytorch", - "name": "record_event", - "source_code": "def record_event(self, event = None): if event is None: event = Event() event.record(self) return event", - "docstring": "Record an event. Args: event (torch.cuda.Event, optional): event to record. If not given, a new one will be allocated. Returns: Recorded event.", - "type": "method", - "file_path": "pytorch\\torch\\cuda\\streams.py", - "ast_data": "FunctionDef name:record_event arguments arg:self arg:event If Compare op:Is Assign Call call:Event Return return:yes" - }, - { - "library": "scipy", - "name": "Jacobian", - "source_code": "class Jacobian: def __init__(self, **kw): names = ['solve', 'update', 'matvec', 'rmatvec', 'rsolve', 'matmat', 'todense', 'shape', 'dtype'] for name, value in kw.items(): if name not in names: raise ValueError(f'Unknown keyword argument {name}') if value is not None: setattr(self, name, kw[name]) if hasattr(self, 'todense'): def __array__(self, dtype = None, copy = None): if dtype is not None: raise ValueError(f'`dtype` must be None, was {dtype}') return self.todense() def aspreconditioner(self): return InverseJacobian(self) def solve(self, v, tol = 0): raise NotImplementedError def update(self, x, F): pass def setup(self, x, F, func): self.func = func self.shape = (F.size, x.size) self.dtype = F.dtype if self.__class__.setup is Jacobian.setup: self.update(x, F)", - "docstring": "Common interface for Jacobians or Jacobian approximations. The optional methods come useful when implementing trust region etc., algorithms that often require evaluating transposes of the Jacobian. Methods ------- solve Returns J^-1 * v update Updates Jacobian to point (where the function has residual ) matvec : optional Returns J * v rmatvec : optional Returns A^H * v rsolve : optional Returns A^-H * v matmat : optional Returns A * V, where V is a dense matrix with dimensions (N,K). todense : optional Form the dense Jacobian matrix. Necessary for dense trust region algorithms, and useful for testing. Attributes ---------- shape Matrix dimensions (M, N) dtype Data type of the matrix. func : callable, optional Function the Jacobian corresponds to", - "type": "class", - "file_path": "scipy\\scipy\\optimize\\_nonlin.py", - "ast_data": "ClassDef name:Jacobian FunctionDef name:__init__ arguments arg:self kwarg:kw Assign For Call call:items If Compare op:NotIn Raise raises:ValueError(f'Unknown keyword argument {name}') If Compare op:IsNot If Call call:hasattr FunctionDef name:__array__ arguments arg:self arg:dtype arg:copy If Compare op:IsNot Raise raises:ValueError(f'`dtype` must be None, was {dtype}') Return return:yes FunctionDef name:aspreconditioner arguments arg:self Return return:yes FunctionDef name:solve arguments arg:self arg:v arg:tol Raise raises:NotImplementedError FunctionDef name:update arguments arg:self arg:x arg:F FunctionDef name:setup arguments arg:self arg:x arg:F arg:func Assign Assign Assign If Compare op:Is" - }, - { - "library": "kornia", - "name": "laplacian_1d", - "source_code": "def laplacian_1d(window_size: int, *, device: Optional[Device] = None, dtype: Dtype = torch.float32) -> Tensor: filter_1d = torch.ones(window_size, device = device, dtype = dtype) middle = window_size // 2 filter_1d[middle] = 1 - window_size return filter_1d", - "docstring": "One could also use the Laplacian of Gaussian formula to design the filter.", - "type": "function", - "file_path": "kornia\\kornia\\filters\\kernels.py", - "ast_data": "FunctionDef name:laplacian_1d arguments arg:window_size type:int Assign Call call:ones Assign Assign Return return:yes" - }, - { - "library": "django", - "name": "add_update_values", - "source_code": "def add_update_values(self, values): values_seq = [] for name, val in values.items(): field = self.get_meta().get_field(name) direct = not (field.auto_created and (not field.concrete)) or not field.concrete model = field.model._meta.concrete_model if field.name = = 'pk' and model._meta.is_composite_pk: raise FieldError('Composite primary key fields must be updated individually.') if not direct or (field.is_relation and field.many_to_many): raise FieldError('Cannot update model field %r (only non-relations and foreign keys permitted).' % field) if model is not self.get_meta().concrete_model: self.add_related_update(model, field, val) continue values_seq.append((field, model, val)) return self.add_update_fields(values_seq)", - "docstring": "Convert a dictionary of field name to value mappings into an update query. This is the entry point for the public update() method on querysets.", - "type": "method", - "file_path": "django\\django\\db\\models\\sql\\subqueries.py", - "ast_data": "FunctionDef name:add_update_values arguments arg:self arg:values Assign For Call call:items Assign Call call:get_field Assign BoolOp Assign If BoolOp Compare op:Eq Raise raises:FieldError('Composite primary key fields must be updated individually.') If BoolOp BoolOp Raise raises:FieldError('Cannot update model field %r (only non-relations and foreign keys permitted).' % field) If Compare op:IsNot Return return:yes" - }, - { - "library": "matplotlib", - "name": "ThetaAxis", - "source_code": "class ThetaAxis(maxis.XAxis): __name__ = 'thetaaxis' axis_name = 'theta' _tick_class = ThetaTick def _wrap_locator_formatter(self): self.set_major_locator(ThetaLocator(self.get_major_locator())) self.set_major_formatter(ThetaFormatter()) self.isDefault_majloc = True self.isDefault_majfmt = True def clear(self): super().clear() self.set_ticks_position('none') self._wrap_locator_formatter() def _set_scale(self, value, **kwargs): if value ! = 'linear': raise NotImplementedError('The xscale cannot be set on a polar plot') super()._set_scale(value, **kwargs) self.get_major_locator().set_params(steps = [1, 1.5, 3, 4.5, 9, 10]) self._wrap_locator_formatter() def _copy_tick_props(self, src, dest): if src is None or dest is None: return super()._copy_tick_props(src, dest) trans = dest._get_text1_transform()[0] dest.label1.set_transform(trans + dest._text1_translate) trans = dest._get_text2_transform()[0] dest.label2.set_transform(trans + dest._text2_translate)", - "docstring": "A theta Axis. This overrides certain properties of an to provide special-casing for an angular axis.", - "type": "class", - "file_path": "matplotlib\\lib\\matplotlib\\projections\\polar.py", - "ast_data": "ClassDef name:ThetaAxis Assign Assign Assign FunctionDef name:_wrap_locator_formatter arguments arg:self Assign Assign FunctionDef name:clear arguments arg:self FunctionDef name:_set_scale arguments arg:self arg:value kwarg:kwargs If Compare op:NotEq Raise raises:NotImplementedError('The xscale cannot be set on a polar plot') FunctionDef name:_copy_tick_props arguments arg:self arg:src arg:dest If BoolOp Compare op:Is Compare op:Is Return return:no Assign Assign" - }, - { - "library": "flexx", - "name": "serving", - "source_code": "@property def serving(self): return self._serving", - "docstring": "Get a tuple (hostname, port) that is being served. Or None if the server is not serving (anymore).", - "type": "method", - "file_path": "flexx\\flexx\\app\\_server.py", - "ast_data": "FunctionDef name:serving arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "normalize_choices", - "source_code": "def normalize_choices(value, *, depth = 0): from django.db.models.enums import ChoicesType match value: case BaseChoiceIterator() | Promise() | bytes() | str(): return value case ChoicesType(): return value.choices case Mapping() if depth < 2: value = value.items() case Iterator() if depth < 2: pass case Iterable() if depth < 2 and (not any((isinstance(x, (Promise, bytes, str)) for x in value))): pass case Callable() if depth = = 0: return CallableChoiceIterator(value) case Callable() if depth < 2: value = value() case _: return value try: return [(k, normalize_choices(v, depth = depth + 1)) for k, v in value] except (TypeError, ValueError): return value", - "docstring": "Normalize choices values consistently for fields and widgets.", - "type": "function", - "file_path": "django\\django\\utils\\choices.py", - "ast_data": "FunctionDef name:normalize_choices arguments arg:value Try Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "scikit-learn", - "name": "check_convergence", - "source_code": "def check_convergence(self, X, y, sample_weight): if self.verbose: print(' Check Convergence') g_max_abs = np.max(np.abs(self.gradient)) check = g_max_abs < = self.tol if self.verbose: print(f' 1. max |gradient| {g_max_abs} < = {self.tol} {check}') if not check: return d2 = self.coef_newton @ self.hessian @ self.coef_newton check = 0.5 * d2 < = self.tol if self.verbose: print(f' 2. Newton decrement {0.5 * d2} < = {self.tol} {check}') if not check: return if self.verbose: loss_value = self.linear_loss.loss(coef = self.coef, X = X, y = y, sample_weight = sample_weight, l2_reg_strength = self.l2_reg_strength, n_threads = self.n_threads) print(f' Solver did converge at loss = {loss_value}.') self.converged = True", - "docstring": "Check for convergence. Sets self.converged.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\linear_model\\_glm\\_newton_solver.py", - "ast_data": "FunctionDef name:check_convergence arguments arg:self arg:X arg:y arg:sample_weight If Assign Call call:max Assign Compare op:LtE If If Return return:no Assign Assign Compare op:LtE If If Return return:no If Assign Call call:loss Assign" - }, - { - "library": "cherrypy", - "name": "HandlerWrapperTool", - "source_code": "class HandlerWrapperTool(Tool): def __init__(self, newhandler, point = 'before_handler', name = None, priority = 50): self.newhandler = newhandler self._point = point self._name = name self._priority = priority def callable(self, *args, **kwargs): innerfunc = cherrypy.serving.request.handler def wrap(*args, **kwargs): return self.newhandler(innerfunc, *args, **kwargs) cherrypy.serving.request.handler = wrap", - "docstring": "Tool which wraps request.handler in a provided wrapper function. The 'newhandler' arg must be a handler wrapper function that takes a 'next_handler' argument, plus ``. Like all page handler functions, it must return an iterable for use as cherrypy.response.body. For example, to allow your 'inner' page handlers to return dicts which then get interpolated into a template:: def interpolator(next_handler, *args, **kwargs): filename = cherrypy.request.config.get('template') cherrypy.response.template = env.get_template(filename) response_dict = next_handler(*args, **kwargs) return cherrypy.response.template.render(**response_dict) cherrypy.tools.jinja = HandlerWrapperTool(interpolator)", - "type": "class", - "file_path": "cherrypy\\cherrypy\\_cptools.py", - "ast_data": "ClassDef name:HandlerWrapperTool FunctionDef name:__init__ arguments arg:self arg:newhandler arg:point arg:name arg:priority Assign Assign Assign Assign FunctionDef name:callable arguments arg:self vararg:args kwarg:kwargs Assign FunctionDef name:wrap arguments vararg:args kwarg:kwargs Return return:yes Assign" - }, - { - "library": "pytorch", - "name": "__init__", - "source_code": "def __init__(self, kernel_name: str, runtime_arg_info: list['ArgInfo'], runtime_arg_values: list[Any]) -> None: super().__init__() self.kernel_name = kernel_name self.named_nodes: dict[str, IRNode] = {} self.runtime_arg_info = runtime_arg_info self.runtime_arg_values = runtime_arg_values", - "docstring": "Initializes a new instance of the ROCmTemplateKernel class. Args: kernel_name (str): The name of the kernel.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\rocm\\rocm_kernel.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:kernel_name type:str arg:runtime_arg_info type:list['ArgInfo'] arg:runtime_arg_values type:list[Any] Assign Assign Assign" - }, - { - "library": "authlib", - "name": "decrypt", - "source_code": "def decrypt(self, ciphertext, aad, iv, tag, key): self.check_iv(iv) chacha = Cryptodome_ChaCha20_Poly1305.new(key = key, nonce = iv) chacha.update(aad) return chacha.decrypt_and_verify(ciphertext, tag)", - "docstring": "Content Decryption with AEAD_XCHACHA20_POLY1305. :param ciphertext: ciphertext in bytes :param aad: additional authenticated data in bytes :param iv: initialization vector in bytes :param tag: authentication tag in bytes :param key: encrypted key in bytes :return: message", - "type": "method", - "file_path": "authlib\\authlib\\jose\\drafts\\_jwe_enc_cryptodome.py", - "ast_data": "FunctionDef name:decrypt arguments arg:self arg:ciphertext arg:aad arg:iv arg:tag arg:key Assign Call call:new Return return:yes" - }, - { - "library": "tensorflow", - "name": "CallContext", - "source_code": "class CallContext(object): def __init__(self): self.in_call = False self._state = {'layer': None, 'inputs': None, 'build_graph': False, 'training': None, 'saving': None} self._in_keras_graph = False def enter(self, layer, inputs, build_graph, training, saving = None): state = {'layer': layer, 'inputs': inputs, 'build_graph': build_graph, 'training': training, 'saving': saving} return CallContextManager(self, state) @property def layer(self): return self._state['layer'] @property def inputs(self): return self._state['inputs'] @property def build_graph(self): return self._state['build_graph'] @property def training(self): return self._state['training'] @property def saving(self): return self._state['saving'] @property def frozen(self): layer = self._state['layer'] if not layer: return False return not layer.trainable @property def in_keras_graph(self): if context.executing_eagerly(): return False return self._in_keras_graph or getattr(backend.get_graph(), 'name', None) = = 'keras_graph'", - "docstring": "Keeps track of properties currently inside a Layer/Model's . Attributes: in_call: Whether currently inside the of a Layer. layer: The whose is currently active. inputs: The inputs to the currently active . build_graph: Whether currently inside a Graph or FuncGraph. training: Whether currently executing in training or inference mode. saving: Whether currently saving to SavedModel. frozen: Whether currently executing inside a with set to . in_keras_graph: Whether executing inside the Keras Graph.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer_utils.py", - "ast_data": "ClassDef name:CallContext FunctionDef name:__init__ arguments arg:self Assign Assign Assign FunctionDef name:enter arguments arg:self arg:layer arg:inputs arg:build_graph arg:training arg:saving Assign Return return:yes FunctionDef name:layer arguments arg:self Return return:yes FunctionDef name:inputs arguments arg:self Return return:yes FunctionDef name:build_graph arguments arg:self Return return:yes FunctionDef name:training arguments arg:self Return return:yes FunctionDef name:saving arguments arg:self Return return:yes FunctionDef name:frozen arguments arg:self Assign If Return return:yes Return return:yes FunctionDef name:in_keras_graph arguments arg:self If Call call:executing_eagerly Return return:yes Return return:yes" - }, - { - "library": "numpy", - "name": "__rpow__", - "source_code": "def __rpow__(self, other): return power(other, self)", - "docstring": "Raise other to the power self, masking the potential NaNs/Infs", - "type": "method", - "file_path": "numpy\\numpy\\ma\\core.py", - "ast_data": "FunctionDef name:__rpow__ arguments arg:self arg:other Return return:yes" - }, - { - "library": "tensorflow", - "name": "trace", - "source_code": "def trace(self, name = 'trace'): with self._name_scope(name): return self._trace()", - "docstring": "Trace of the linear operator, equal to sum of . If the operator is square, this is also the sum of the eigenvalues. Args: name: A name for this . Returns: Shape of same as .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", - "ast_data": "FunctionDef name:trace arguments arg:self arg:name With Return return:yes" - }, - { - "library": "matplotlib", - "name": "home", - "source_code": "def home(self, *args): self._nav_stack.home() self.set_history_buttons() self._update_view()", - "docstring": "Restore the original view. For convenience of being directly connected as a GUI callback, which often get passed additional parameters, this method accepts arbitrary parameters, but does not use them.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", - "ast_data": "FunctionDef name:home arguments arg:self vararg:args" - }, - { - "library": "tensorflow", - "name": "generate_keras_metadata", - "source_code": "def generate_keras_metadata(saved_nodes, node_paths): metadata = saved_metadata_pb2.SavedMetadata() for node_id, node in enumerate(saved_nodes): if isinstance(node, base_layer.Layer): path = node_paths[node] if not path: node_path = 'root' else: node_path = 'root.{}'.format('.'.join([ref.name for ref in path])) metadata.nodes.add(node_id = node_id, node_path = node_path, version = versions_pb2.VersionDef(producer = 1, min_consumer = 1, bad_consumers = []), identifier = node._object_identifier, metadata = node._tracking_metadata) return metadata", - "docstring": "Constructs a KerasMetadata proto with the metadata of each keras object.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\save.py", - "ast_data": "FunctionDef name:generate_keras_metadata arguments arg:saved_nodes arg:node_paths Assign Call call:SavedMetadata For Call call:enumerate If Call call:isinstance Assign If Assign Assign Call call:format Return return:yes" - }, - { - "library": "numpy", - "name": "fromarrays", - "source_code": "def fromarrays(arraylist, dtype = None, shape = None, formats = None, names = None, titles = None, aligned = False, byteorder = None, fill_value = None): datalist = [ma.getdata(x) for x in arraylist] masklist = [np.atleast_1d(ma.getmaskarray(x)) for x in arraylist] _array = np.rec.fromarrays(datalist, dtype = dtype, shape = shape, formats = formats, names = names, titles = titles, aligned = aligned, byteorder = byteorder).view(mrecarray) _array._mask.flat = list(zip(*masklist)) if fill_value is not None: _array.fill_value = fill_value return _array", - "docstring": "Creates a mrecarray from a (flat) list of masked arrays. Parameters ---------- arraylist : sequence A list of (masked) arrays. Each element of the sequence is first converted to a masked array if needed. If a 2D array is passed as argument, it is processed line by line dtype : {None, dtype}, optional Data type descriptor. shape : {None, integer}, optional Number of records. If None, shape is defined from the shape of the first array in the list. formats : {None, sequence}, optional Sequence of formats for each individual field. If None, the formats will be autodetected by inspecting the fields and selecting the highest dtype possible. names : {None, sequence}, optional Sequence of the names of each field. fill_value : {None, sequence}, optional Sequence of data to be used as filling values. Notes ----- Lists of tuples should be preferred over lists of lists for faster processing.", - "type": "function", - "file_path": "numpy\\numpy\\ma\\mrecords.py", - "ast_data": "FunctionDef name:fromarrays arguments arg:arraylist arg:dtype arg:shape arg:formats arg:names arg:titles arg:aligned arg:byteorder arg:fill_value Assign Assign Assign Call call:view Assign Call call:list If Compare op:IsNot Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "write", - "source_code": "def write(self, dataset): if not isinstance(dataset, data_types.DatasetV2): raise TypeError(f'Invalid `dataset.` Expected a `tf.data.Dataset` object but got {type(dataset)}.') if not dataset_ops.get_structure(dataset).is_compatible_with(tensor_spec.TensorSpec([], dtypes.string)): raise TypeError(f'Invalid `dataset`. Expected a`dataset` that produces scalar `tf.string` elements, but got a dataset which produces elements with shapes {dataset_ops.get_legacy_output_shapes(dataset)} and types {dataset_ops.get_legacy_output_types(dataset)}.') dataset = dataset._apply_debug_options() return gen_experimental_dataset_ops.dataset_to_tf_record(dataset._variant_tensor, self._filename, self._compression_type)", - "docstring": "Writes a dataset to a TFRecord file. An operation that writes the content of the specified dataset to the file specified in the constructor. If the file exists, it will be overwritten. Args: dataset: a whose elements are to be written to a file Returns: In graph mode, this returns an operation which when executed performs the write. In eager mode, the write is performed by the method itself and there is no return value. Raises TypeError: if is not a . TypeError: if the elements produced by the dataset are not scalar strings.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\writers.py", - "ast_data": "FunctionDef name:write arguments arg:self arg:dataset If Raise raises:TypeError(f'Invalid `dataset.` Expected a `tf.data.Dataset` object but got {type(dataset)}.') If Raise raises:TypeError(f'Invalid `dataset`. Expected a`dataset` that produces scalar `tf.string` elements, but got a dataset which produces elements with shapes {dataset_ops.get_legacy_output_shapes(dataset)} and types {dataset_ops.get_legacy_output_types(dataset)}.') Assign Call call:_apply_debug_options Return return:yes" - }, - { - "library": "scipy", - "name": "is_pydata_spmatrix", - "source_code": "def is_pydata_spmatrix(m) -> bool: base_cls = getattr(sys.modules.get('sparse'), 'SparseArray', None) return base_cls is not None and isinstance(m, base_cls)", - "docstring": "Check whether object is pydata/sparse matrix, avoiding importing the module.", - "type": "function", - "file_path": "scipy\\scipy\\sparse\\_sputils.py", - "ast_data": "FunctionDef name:is_pydata_spmatrix arguments arg:m Assign Call call:getattr Return return:yes" - }, - { - "library": "django", - "name": "blankout", - "source_code": "def blankout(src, char): return dot_re.sub(char, src)", - "docstring": "Change every non-whitespace character to the given char. Used in the templatize function.", - "type": "function", - "file_path": "django\\django\\utils\\translation\\template.py", - "ast_data": "FunctionDef name:blankout arguments arg:src arg:char Return return:yes" - }, - { - "library": "scikit-learn", - "name": "predict_proba", - "source_code": "def predict_proba(self, raw_prediction): if raw_prediction.ndim = = 2 and raw_prediction.shape[1] = = 1: raw_prediction = raw_prediction.squeeze(1) proba = np.empty((raw_prediction.shape[0], 2), dtype = raw_prediction.dtype) proba[:, 1] = self.link.inverse(raw_prediction) proba[:, 0] = 1 - proba[:, 1] return proba", - "docstring": "Predict probabilities. Parameters ---------- raw_prediction : array of shape (n_samples,) or (n_samples, 1) Raw prediction values (in link space). Returns ------- proba : array of shape (n_samples, 2) Element-wise class probabilities.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\_loss\\loss.py", - "ast_data": "FunctionDef name:predict_proba arguments arg:self arg:raw_prediction If BoolOp Compare op:Eq Compare op:Eq Assign Call call:squeeze Assign Call call:empty Assign Call call:inverse Assign Return return:yes" - }, - { - "library": "numpy", - "name": "hermmul", - "source_code": "def hermmul(c1, c2): [c1, c2] = pu.as_series([c1, c2]) if len(c1) > len(c2): c = c2 xs = c1 else: c = c1 xs = c2 if len(c) = = 1: c0 = c[0] * xs c1 = 0 elif len(c) = = 2: c0 = c[0] * xs c1 = c[1] * xs else: nd = len(c) c0 = c[-2] * xs c1 = c[-1] * xs for i in range(3, len(c) + 1): tmp = c0 nd = nd - 1 c0 = hermsub(c[-i] * xs, c1 * (2 * (nd - 1))) c1 = hermadd(tmp, hermmulx(c1) * 2) return hermadd(c0, hermmulx(c1) * 2)", - "docstring": "Multiply one Hermite series by another. Returns the product of two Hermite series * . The arguments are sequences of coefficients, from lowest order \"term\" to highest, e.g., [1,2,3] represents the series ``. Parameters ---------- c1, c2 : array_like 1-D arrays of Hermite series coefficients ordered from low to high. Returns ------- out : ndarray Of Hermite series coefficients representing their product. See Also -------- hermadd, hermsub, hermmulx, hermdiv, hermpow Notes ----- In general, the (polynomial) product of two C-series results in terms that are not in the Hermite polynomial basis set. Thus, to express the product as a Hermite series, it is necessary to \"reproject\" the product onto said basis set, which may produce \"unintuitive\" (but correct) results; see Examples section below. Examples -------- >>> from numpy.polynomial.hermite import hermmul >>> hermmul([1, 2, 3], [0, 1, 2]) array([52., 29., 52., 7., 6.])", - "type": "function", - "file_path": "numpy\\numpy\\polynomial\\hermite.py", - "ast_data": "FunctionDef name:hermmul arguments arg:c1 arg:c2 Assign Call call:as_series If Compare op:Gt Assign Assign Assign Assign If Compare op:Eq Assign Assign If Compare op:Eq Assign Assign Assign Call call:len Assign Assign For Call call:range Assign Assign Assign Call call:hermsub Assign Call call:hermadd Return return:yes" - }, - { - "library": "tensorflow", - "name": "sparse_retain", - "source_code": "@tf_export('sparse.retain', v1 = ['sparse.retain', 'sparse_retain']) @deprecation.deprecated_endpoints('sparse_retain') def sparse_retain(sp_input, to_retain): sp_input = _convert_to_sparse_tensor(sp_input) to_retain = ops.convert_to_tensor(to_retain) retain_shape = to_retain.get_shape() retain_shape.assert_has_rank(1) if sp_input.values.get_shape().dims is not None: sp_input.values.get_shape().dims[0].assert_is_compatible_with(tensor_shape.dimension_at_index(retain_shape, 0)) where_true = array_ops.reshape(array_ops.where_v2(to_retain), [-1]) new_indices = array_ops.gather(sp_input.indices, where_true) new_values = array_ops.gather(sp_input.values, where_true) return sparse_tensor.SparseTensor(new_indices, new_values, array_ops.identity(sp_input.dense_shape))", - "docstring": "Retains specified non-empty values within a . For example, if has shape and 4 non-empty string values: [0, 1]: a [0, 3]: b [2, 0]: c [3, 1]: d and , then the output will be a of shape with 2 non-empty values: [0, 1]: a [3, 1]: d Args: sp_input: The input with non-empty elements. to_retain: A bool vector of length with true values. Returns: A with the same shape as the input and non-empty elements corresponding to the true positions in . Raises: TypeError: If is not a .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py", - "ast_data": "FunctionDef name:sparse_retain arguments arg:sp_input arg:to_retain Call call:tf_export Call call:deprecated_endpoints Assign Call call:_convert_to_sparse_tensor Assign Call call:convert_to_tensor Assign Call call:get_shape If Compare op:IsNot Assign Call call:reshape Assign Call call:gather Assign Call call:gather Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_cb_parent_spans", - "source_code": "def get_cb_parent_spans(cbax): rowstart = np.inf rowstop = -np.inf colstart = np.inf colstop = -np.inf for parent in cbax._colorbar_info['parents']: ss = parent.get_subplotspec() rowstart = min(ss.rowspan.start, rowstart) rowstop = max(ss.rowspan.stop, rowstop) colstart = min(ss.colspan.start, colstart) colstop = max(ss.colspan.stop, colstop) rowspan = range(rowstart, rowstop) colspan = range(colstart, colstop) return (rowspan, colspan)", - "docstring": "Figure out which subplotspecs this colorbar belongs to. Parameters ---------- cbax : Axes for the colorbar.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\_constrained_layout.py", - "ast_data": "FunctionDef name:get_cb_parent_spans arguments arg:cbax Assign Assign Assign Assign For Assign Call call:get_subplotspec Assign Call call:min Assign Call call:max Assign Call call:min Assign Call call:max Assign Call call:range Assign Call call:range Return return:yes" - }, - { - "library": "sphinx", - "name": "stable_hash", - "source_code": "def stable_hash(obj: Any) -> str: if isinstance(obj, dict): obj = sorted(map(stable_hash, obj.items())) if isinstance(obj, list | tuple | set | frozenset): obj = sorted(map(stable_hash, obj)) elif isinstance(obj, type | types.FunctionType): obj = f'{obj.__module__}.{obj.__qualname__}' return hashlib.md5(str(obj).encode(), usedforsecurity = False).hexdigest()", - "docstring": "Return a stable hash for a Python data structure. We can't just use the md5 of str(obj) as the order of collections may be random.", - "type": "function", - "file_path": "sphinx\\sphinx\\util\\_serialise.py", - "ast_data": "FunctionDef name:stable_hash arguments arg:obj type:Any If Call call:isinstance Assign Call call:sorted If Call call:isinstance Assign Call call:sorted If Call call:isinstance Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, model_content, custom_op_registerers_by_name = None, custom_op_registerers_by_func = None): if not model_content: raise ValueError('`model_content` must be specified.') if custom_op_registerers_by_name is None: custom_op_registerers_by_name = [] if custom_op_registerers_by_func is None: custom_op_registerers_by_func = [] try: self._calibrator = _calibration_wrapper.CalibrationWrapper(model_content, custom_op_registerers_by_name, custom_op_registerers_by_func) self._model_content = model_content except Exception as e: raise ValueError('Failed to parse the model: %s.' % e) if not self._calibrator: raise ValueError('Failed to parse the model.') self._interpreter = None", - "docstring": "Constructor. Args: model_content: Content of a TF-Lite Flatbuffer file. custom_op_registerers_by_name: List of str (symbol names) that take a pointer to a MutableOpResolver and register custom ops. custom_op_registerers_by_func: List of functions that take a pointer to a MutableOpResolver and register custom ops. Raises: ValueError: If the calibrator was unable to open the model.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\lite\\python\\optimize\\calibrator.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:model_content arg:custom_op_registerers_by_name arg:custom_op_registerers_by_func If Raise raises:ValueError('`model_content` must be specified.') If Compare op:Is Assign If Compare op:Is Assign Try Assign Call call:CalibrationWrapper Assign ExceptHandler Raise raises:ValueError('Failed to parse the model: %s.' % e) If Raise raises:ValueError('Failed to parse the model.') Assign" - }, - { - "library": "cherrypy", - "name": "check_app_config_brackets", - "source_code": "def check_app_config_brackets(self): for sn, app in cherrypy.tree.apps.items(): if not isinstance(app, cherrypy.Application): continue if not app.config: continue for key in app.config.keys(): if key.startswith('[') or key.endswith(']'): warnings.warn('The application mounted at %r has config section names with extraneous brackets: %r. Config *files* need brackets; config *dicts* (e.g. passed to tree.mount) do not.' % (sn, key))", - "docstring": "Check for App config with extraneous brackets in section names.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\_cpchecker.py", - "ast_data": "FunctionDef name:check_app_config_brackets arguments arg:self For Call call:items If If For Call call:keys If BoolOp Call call:startswith Call call:endswith" - }, - { - "library": "scipy", - "name": "Shell", - "source_code": "@cli.cls_cmd('shell') class Shell(Python): ctx = CONTEXT pythonpath = Python.pythonpath extra_argv = Python.extra_argv @classmethod def run(cls, pythonpath, extra_argv, **kwargs): cls._setup(pythonpath, **kwargs) shell = os.environ.get('SHELL', 'sh') click.echo(f\"Spawning a Unix shell '{shell}' ...\") os.execv(shell, [shell] + list(extra_argv)) sys.exit(1)", - "docstring": ":wrench: Start Unix shell with PYTHONPATH set. Running is equivalent to: 1. Execute build command (skip by passing the global option). 2. Open a new shell. 3. Set the PYTHONPATH environment variable in shell (query with ).", - "type": "class", - "file_path": "scipy\\dev.py", - "ast_data": "ClassDef name:Shell Call call:cls_cmd Assign Assign Assign FunctionDef name:run arguments arg:cls arg:pythonpath arg:extra_argv kwarg:kwargs Assign Call call:get" - }, - { - "library": "pandas", - "name": "__contains__", - "source_code": "def __contains__(self, key: str) -> bool: node = self.get_node(key) if node is not None: name = node._v_pathname if key in (name, name[1:]): return True return False", - "docstring": "check for existence of this key can match the exact pathname or the pathnm w/o the leading '/'", - "type": "method", - "file_path": "pandas\\pandas\\io\\pytables.py", - "ast_data": "FunctionDef name:__contains__ arguments arg:self arg:key type:str Assign Call call:get_node If Compare op:IsNot Assign If Compare op:In Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "render_pep440_pre", - "source_code": "def render_pep440_pre(pieces): if pieces['closest-tag']: if pieces['distance']: tag_version, post_version = pep440_split_post(pieces['closest-tag']) rendered = tag_version if post_version is not None: rendered + = f'.post{post_version + 1}.dev{pieces['distance']}' else: rendered + = f'.post0.dev{pieces['distance']}' else: rendered = pieces['closest-tag'] else: rendered = f'0.post0.dev{pieces['distance']}' return rendered", - "docstring": "TAG[.postN.devDISTANCE] -- No -dirty. Exceptions: 1: no tags. 0.post0.devDISTANCE", - "type": "function", - "file_path": "pandas\\pandas\\_version.py", - "ast_data": "FunctionDef name:render_pep440_pre arguments arg:pieces If If Assign Call call:pep440_split_post Assign If Compare op:IsNot Assign Assign Return return:yes" - }, - { - "library": "kornia", - "name": "KORNIA_CHECK_SAME_DEVICE", - "source_code": "def KORNIA_CHECK_SAME_DEVICE(x: Tensor, y: Tensor, raises: bool = True) -> bool: if x.device ! = y.device: if raises: raise TypeError(f'Not same device for tensors. Got: {x.device} and {y.device}') return False return True", - "docstring": "Check whether two tensor in the same device. Args: x: first tensor to evaluate. y: sencod tensor to evaluate. msg: message to show in the exception. raises: bool indicating whether an exception should be raised upon failure. Raises: TypeException: if the two tensors are not in the same device and raises is True. Example: >>> x1 = torch.rand(2, 3, 3) >>> x2 = torch.rand(1, 3, 1) >>> KORNIA_CHECK_SAME_DEVICE(x1, x2) True", - "type": "function", - "file_path": "kornia\\kornia\\core\\check.py", - "ast_data": "FunctionDef name:KORNIA_CHECK_SAME_DEVICE arguments arg:x type:Tensor arg:y type:Tensor arg:raises type:bool If Compare op:NotEq If Raise raises:TypeError(f'Not same device for tensors. Got: {x.device} and {y.device}') Return return:yes Return return:yes" - }, - { - "library": "salmon", - "name": "get_config", - "source_code": "def get_config() -> VersioneerConfig: cfg = VersioneerConfig() cfg.VCS = 'git' cfg.style = 'pep440' cfg.tag_prefix = '' cfg.parentdir_prefix = 'salmon-' cfg.versionfile_source = 'salmon/_version.py' cfg.verbose = False return cfg", - "docstring": "Create, populate and return the VersioneerConfig() object.", - "type": "function", - "file_path": "salmon\\salmon\\_version.py", - "ast_data": "FunctionDef name:get_config arguments Assign Call call:VersioneerConfig Assign Assign Assign Assign Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "internal_convert_n_to_tensor_or_indexed_slices", - "source_code": "def internal_convert_n_to_tensor_or_indexed_slices(values, dtype = None, name = None, as_ref = False): if not isinstance(values, collections_abc.Iterable): raise TypeError('Argument `values` must be iterable.') ret = [] for i, value in enumerate(values): if value is None: ret.append(value) else: n = None if name is None else '%s_%d' % (name, i) ret.append(internal_convert_to_tensor_or_indexed_slices(value, dtype = dtype, name = n, as_ref = as_ref)) return ret", - "docstring": "Converts to a list of or objects. Any or objects in are returned unmodified. Args: values: An iterable of , , , or objects that can be consumed by . dtype: (Optional.) The required of the returned or . name: (Optional.) A name prefix to used when a new is created, in which case element will be given the name . as_ref: True if the caller wants the results as ref tensors. Returns: A list of , , and/or objects. Raises: TypeError: If no conversion function is registered for an element in . RuntimeError: If a registered conversion function returns an invalid value.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py", - "ast_data": "FunctionDef name:internal_convert_n_to_tensor_or_indexed_slices arguments arg:values arg:dtype arg:name arg:as_ref If Raise raises:TypeError('Argument `values` must be iterable.') Assign For Call call:enumerate If Compare op:Is Assign Return return:yes" - }, - { - "library": "kornia", - "name": "q", - "source_code": "@property def q(self) -> Quaternion: return self._q", - "docstring": "Return the underlying data with shape :math:.", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\liegroup\\so3.py", - "ast_data": "FunctionDef name:q arguments arg:self Return return:yes" - }, - { - "library": "scikit-learn", - "name": "get_step_message", - "source_code": "def get_step_message(log, start, end, title, message, details): if end not in log: return '' res = f'-----------------------------------------------\\n### {title}\\n\\n{message}\\n\\n' if details: res + = '
\\n\\n```\\n' + log[log.find(start) + len(start) + 1: log.find(end) - 1] + '\\n```\\n\\n
\\n\\n' return res", - "docstring": "Get the message for a specific test. Parameters ---------- log : str The log of the linting job. start : str The string that marks the start of the test. end : str The string that marks the end of the test. title : str The title for this section. message : str The message to be added at the beginning of the section. details : bool Whether to add the details of each step. Returns ------- message : str The message to be added to the comment.", - "type": "function", - "file_path": "scikit-learn\\build_tools\\get_comment.py", - "ast_data": "FunctionDef name:get_step_message arguments arg:log arg:start arg:end arg:title arg:message arg:details If Compare op:NotIn Return return:yes Assign If Return return:yes" - }, - { - "library": "numpy", - "name": "unique_inverse", - "source_code": "@array_function_dispatch(_unique_inverse_dispatcher) def unique_inverse(x): result = unique(x, return_index = False, return_inverse = True, return_counts = False, equal_nan = False) return UniqueInverseResult(*result)", - "docstring": "Find the unique elements of and indices to reconstruct . This function is an Array API compatible alternative to:: np.unique(x, return_inverse=True, equal_nan=False, sorted=False) but returns a namedtuple for easier access to each output. .. note:: This function currently always returns a sorted result, however, this could change in any NumPy minor release. Parameters ---------- x : array_like Input array. It will be flattened if it is not already 1-D. Returns ------- out : namedtuple The result containing: * values - The unique elements of an input array. * inverse_indices - The indices from the set of unique elements that reconstruct . See Also -------- unique : Find the unique elements of an array. Examples -------- >>> import numpy as np >>> x = [1, 1, 2] >>> uniq = np.unique_inverse(x) >>> uniq.values array([1, 2]) >>> uniq.inverse_indices array([0, 0, 1])", - "type": "function", - "file_path": "numpy\\numpy\\lib\\_arraysetops_impl.py", - "ast_data": "FunctionDef name:unique_inverse arguments arg:x Call call:array_function_dispatch Assign Call call:unique Return return:yes" - }, - { - "library": "matplotlib", - "name": "integrate", - "source_code": "def integrate(x0, y0, broken_streamlines = True, integration_max_step_scale = 1.0, integration_max_error_scale = 1.0): stotal, xy_traj = (0.0, []) try: dmap.start_trajectory(x0, y0, broken_streamlines) except InvalidIndexError: return None if integration_direction in ['both', 'backward']: s, xyt = _integrate_rk12(x0, y0, dmap, backward_time, maxlength, broken_streamlines, integration_max_step_scale, integration_max_error_scale) stotal + = s xy_traj + = xyt[: : -1] if integration_direction in ['both', 'forward']: dmap.reset_start_point(x0, y0) s, xyt = _integrate_rk12(x0, y0, dmap, forward_time, maxlength, broken_streamlines, integration_max_step_scale, integration_max_error_scale) stotal + = s xy_traj + = xyt[1:] if stotal > minlength: return np.broadcast_arrays(xy_traj, np.empty((1, 2)))[0] else: dmap.undo_trajectory() return None", - "docstring": "Return x, y grid-coordinates of trajectory based on starting point. Integrate both forward and backward in time from starting point in grid coordinates. Integration is terminated when a trajectory reaches a domain boundary or when it crosses into an already occupied cell in the StreamMask. The resulting trajectory is None if it is shorter than .", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\streamplot.py", - "ast_data": "FunctionDef name:integrate arguments arg:x0 arg:y0 arg:broken_streamlines arg:integration_max_step_scale arg:integration_max_error_scale Assign Try ExceptHandler Return return:yes If Compare op:In Assign Call call:_integrate_rk12 If Compare op:In Assign Call call:_integrate_rk12 If Compare op:Gt Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "InternalError", - "source_code": "@tf_export('errors.InternalError') class InternalError(OpError): def __init__(self, node_def, op, message, *args): super(InternalError, self).__init__(node_def, op, message, INTERNAL, *args)", - "docstring": "Raised when the system experiences an internal error. This exception is raised when some invariant expected by the runtime has been broken. Catching this exception is not recommended.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py", - "ast_data": "ClassDef name:InternalError Call call:tf_export FunctionDef name:__init__ arguments arg:self arg:node_def arg:op arg:message vararg:args" - }, - { - "library": "numpy", - "name": "place", - "source_code": "@array_function_dispatch(_place_dispatcher) def place(arr, mask, vals): return _place(arr, mask, vals)", - "docstring": "Change elements of an array based on conditional and input values. Similar to `placevalsmaskcopytomaskextractplaceaamaskvalsa` are to be masked, this sequence must be non-empty. See Also -------- copyto, put, take, extract Examples -------- >>> import numpy as np >>> arr = np.arange(6).reshape(2, 3) >>> np.place(arr, arr>2, [44, 55]) >>> arr array([[ 0, 1, 2], [44, 55, 44]])", - "type": "function", - "file_path": "numpy\\numpy\\lib\\_function_base_impl.py", - "ast_data": "FunctionDef name:place arguments arg:arr arg:mask arg:vals Call call:array_function_dispatch Return return:yes" - }, - { - "library": "tensorflow", - "name": "row_starts", - "source_code": "def row_starts(self): return self._row_splits[: -1]", - "docstring": "Returns the start indices for rows in this row partition. These indices specify where the values for each row begin. is equal to . Returns: A 1-D integer Tensor with shape . The returned tensor is nonnegative, and is sorted in ascending order. . .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", - "ast_data": "FunctionDef name:row_starts arguments arg:self Return return:yes" - }, - { - "library": "numpy", - "name": "get_type_analyze_hook", - "source_code": "def get_type_analyze_hook(self, fullname: str) -> _HookFunc | None: if fullname in _PRECISION_DICT: return _hook return None", - "docstring": "Set the precision of platform-specific subclasses. For example: , and .", - "type": "method", - "file_path": "numpy\\numpy\\typing\\mypy_plugin.py", - "ast_data": "FunctionDef name:get_type_analyze_hook arguments arg:self arg:fullname type:str If Compare op:In Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "Price03", - "source_code": "class Price03(Benchmark): def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N)) self.custom_bounds = ([0, 2], [0, 2]) self.global_optimum = [[1.0, 1.0]] self.fglob = 0.0 def fun(self, x, *args): self.nfev + = 1 return 100 * (x[1] - x[0] ** 2) ** 2 + (6.4 * (x[1] - 0.5) ** 2 - x[0] - 0.6) ** 2", - "docstring": "Price 3 objective function. This class defines the Price 3 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Price03}}(x) = 100(x_2 - x_1^2)^2 + \\left[6.4(x_2 - 0.5)^2 - x_1 - 0.6 \\right]^2 with :math: for :math:. *Global optimum*: :math: for :math:, :math:, :math:, :math:. .. [1] Price, W. A controlled random search procedure for global optimisation Computer Journal, 1977, 20, 367-370 TODO Jamil #96 has an erroneous factor of 6 in front of the square brackets", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_P.py", - "ast_data": "ClassDef name:Price03 FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Return return:yes" - }, - { - "library": "pytorch", - "name": "append", - "source_code": "def append(self, module: Module) -> Self: self.add_module(str(len(self)), module) return self", - "docstring": "Append a given module to the end. Args: module (nn.Module): module to append Example:: >>> import torch.nn as nn >>> n = nn.Sequential(nn.Linear(1, 2), nn.Linear(2, 3)) >>> n.append(nn.Linear(3, 4)) Sequential( (0): Linear(in_features=1, out_features=2, bias=True) (1): Linear(in_features=2, out_features=3, bias=True) (2): Linear(in_features=3, out_features=4, bias=True) )", - "type": "method", - "file_path": "pytorch\\torch\\nn\\modules\\container.py", - "ast_data": "FunctionDef name:append arguments arg:self arg:module type:Module Return return:yes" - }, - { - "library": "pandas", - "name": "versions_from_parentdir", - "source_code": "def versions_from_parentdir(parentdir_prefix, root, verbose): rootdirs = [] for _ in range(3): dirname = os.path.basename(root) if dirname.startswith(parentdir_prefix): return {'version': dirname[len(parentdir_prefix):], 'full-revisionid': None, 'dirty': False, 'error': None, 'date': None} rootdirs.append(root) root = os.path.dirname(root) if verbose: print(f'Tried directories {rootdirs!s} but none started with prefix {parentdir_prefix}') raise NotThisMethod(\"rootdir doesn't start with parentdir_prefix\")", - "docstring": "Try to determine the version from the parent directory name. Source tarballs conventionally unpack into a directory that includes both the project name and a version string. We will also support searching up two directory levels for an appropriately named parent directory", - "type": "function", - "file_path": "pandas\\pandas\\_version.py", - "ast_data": "FunctionDef name:versions_from_parentdir arguments arg:parentdir_prefix arg:root arg:verbose Assign For Call call:range Assign Call call:basename If Call call:startswith Return return:yes Assign Call call:dirname If Raise raises:NotThisMethod(\"rootdir doesn't start with parentdir_prefix\")" - }, - { - "library": "django", - "name": "get_table_list", - "source_code": "def get_table_list(self, cursor): cursor.execute(\"\\n SELECT\\n user_tables.table_name, \\n 't', \\n user_tab_comments.comments\\n FROM user_tables\\n LEFT OUTER JOIN\\n user_tab_comments\\n ON user_tab_comments.table_name = user_tables.table_name\\n WHERE\\n NOT EXISTS (\\n SELECT 1\\n FROM user_mviews\\n WHERE user_mviews.mview_name = user_tables.table_name\\n)\\n UNION ALL\\n SELECT view_name, 'v', NULL FROM user_views\\n UNION ALL\\n SELECT mview_name, 'v', NULL FROM user_mviews\\n \") return [TableInfo(self.identifier_converter(row[0]), row[1], row[2]) for row in cursor.fetchall()]", - "docstring": "Return a list of table and view names in the current database.", - "type": "method", - "file_path": "django\\django\\db\\backends\\oracle\\introspection.py", - "ast_data": "FunctionDef name:get_table_list arguments arg:self arg:cursor Return return:yes" - }, - { - "library": "tensorflow", - "name": "generate_dequeue_op", - "source_code": "def generate_dequeue_op(self, tpu_device = 0): self.freeze() if self._generated_dequeue_op and (not ops.inside_function()): raise ValueError(\"Can't generate two dequeue Ops from the same queue\") self._generated_dequeue_op = True full_name = '%s/dequeue' % self._name sharded_shapes = [policy.get_unpartitioned_shape(policy.get_sharded_shape(shape)) for shape, policy in zip(self._tuple_shapes, self._sharding_policies)] if tpu_device is not None: with ops.device(tpu_name_util.core(tpu_device)): dequeue_op = tpu_ops.infeed_dequeue_tuple(dtypes = self._tuple_types, shapes = sharded_shapes, name = full_name) else: dequeue_op = tpu_ops.infeed_dequeue_tuple(dtypes = self._tuple_types, shapes = sharded_shapes, name = full_name) if self._number_of_partitions < = 1: return dequeue_op partitions = [policy.get_unpartitioned_shape([1] * shape.ndims).as_list() for shape, policy in zip(self._tuple_shapes, self._sharding_policies)] return tag_sharding_attribute_for_dequeued_tensors(dequeue_op, partitions)", - "docstring": "Generates the device-side Op to dequeue a tuple from the queue. Implicitly freezes the queue configuration if it is not already frozen, which will raise errors if the shapes and types have not been fully specified. Args: tpu_device: The TPU device ordinal where the infeed instruction should be placed. If None, no explicit placement will be performed, and it is up to the user to call this API from within a proper TPU device scope. The XLA code will fail if the TPU dequeue instruction is not bound to any device. Returns: A list of Outputs corresponding to a shard of infeed dequeued into XLA, suitable for use within a replicated block. Raises: ValueError: if the types or shapes of the tuple elements have not been set; or if a dequeue op has already been generated.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py", - "ast_data": "FunctionDef name:generate_dequeue_op arguments arg:self arg:tpu_device If BoolOp Raise raises:ValueError(\"Can't generate two dequeue Ops from the same queue\") Assign Assign Assign If Compare op:IsNot With Assign Call call:infeed_dequeue_tuple Assign Call call:infeed_dequeue_tuple If Compare op:LtE Return return:yes Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_active", - "source_code": "def set_active(self, index, state = None): if index not in range(len(self.labels)): raise ValueError(f'Invalid CheckButton index: {index}') _api.check_isinstance((bool, None), state = state) invisible = colors.to_rgba('none') facecolors = self._checks.get_facecolor() if state is None: state = colors.same_color(facecolors[index], invisible) facecolors[index] = self._active_check_colors[index] if state else invisible self._checks.set_facecolor(facecolors) if self.drawon: if self._useblit: if self._background is not None: self.canvas.restore_region(self._background) self.ax.draw_artist(self._checks) self.canvas.blit(self.ax.bbox) else: self.canvas.draw() if self.eventson: self._observers.process('clicked', self.labels[index].get_text())", - "docstring": "Modify the state of a check button by index. Callbacks will be triggered if :attr: is True. Parameters ---------- index : int Index of the check button to toggle. state : bool, optional If a boolean value, set the state explicitly. If no value is provided, the state is toggled. Raises ------ ValueError If *index* is invalid. TypeError If *state* is not boolean.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", - "ast_data": "FunctionDef name:set_active arguments arg:self arg:index arg:state If Compare op:NotIn Raise raises:ValueError(f'Invalid CheckButton index: {index}') Assign Call call:to_rgba Assign Call call:get_facecolor If Compare op:Is Assign Call call:same_color Assign If If If Compare op:IsNot If" - }, - { - "library": "scipy", - "name": "fac_psd", - "source_code": "@property def fac_psd(self) -> float: if self.scaling = = 'psd': return 1 if self._fac_psd is None: self._fac_psd = 1 / np.sqrt(sum(self.win.real ** 2 + self.win.imag ** 2) / self.T) return self._fac_psd", - "docstring": "Factor to multiply the STFT values by to scale each frequency slice to a power spectral density (PSD). It is 1 if attribute `scale_to`. See Also -------- fac_magnitude: Scaling factor for to a magnitude spectrum. scale_to: Scale window to obtain 'magnitude' or 'psd' scaling. scaling: Normalization applied to the window function. ShortTimeFFT: Class this property belongs to.", - "type": "method", - "file_path": "scipy\\scipy\\signal\\_short_time_fft.py", - "ast_data": "FunctionDef name:fac_psd arguments arg:self If Compare op:Eq Return return:yes If Compare op:Is Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "trace_function", - "source_code": "def trace_function(args = None, kwargs = None, tracing_options = None): if not tracing_options: tracing_options = TracingOptions() args = args if args else () kwargs = kwargs if kwargs else {} if tracing_options.input_signature and (args or kwargs): bound_args = function_type_utils.bind_function_inputs(args, kwargs, tracing_options.polymorphic_type, tracing_options.default_values) args, kwargs = (bound_args.args, bound_args.kwargs) with tracing_options.lock or contextlib.nullcontext(): if tracing_options.input_signature and (not args) and (not kwargs): args = tracing_options.input_signature kwargs = {} concrete_function = _maybe_define_function(args, kwargs, tracing_options) if not tracing_options.bind_graph_to_function: concrete_function._garbage_collector.release() return concrete_function", - "docstring": "Returns a specialized to inputs and execution context. Compiles a Graph corresponding to the Python function logic and uses that to generate a differentiable ConcreteFunction. Args: args: inputs to specialize on. Can be concrete values (e.g. 1) or or . kwargs: keyword inputs to specialize on. Concrete values (e.g. 1) or or . tracing_options: TracingOptions for the tracing process.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\tracing_compilation.py", - "ast_data": "FunctionDef name:trace_function arguments arg:args arg:kwargs arg:tracing_options If Assign Call call:TracingOptions Assign Assign If BoolOp BoolOp Assign Call call:bind_function_inputs Assign With If BoolOp Assign Assign Assign Call call:_maybe_define_function If Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, sess, grpc_debug_server_addresses, thread_name_filter = None, send_traceback_and_source_code = True): def _gated_grpc_watch_fn(fetches, feeds): del fetches, feeds return framework.WatchOptions(debug_ops = ['DebugIdentity(gated_grpc = true)']) super().__init__(sess, grpc_debug_server_addresses, watch_fn = _gated_grpc_watch_fn, thread_name_filter = thread_name_filter) self._send_traceback_and_source_code = send_traceback_and_source_code self._sent_graph_version = -1 register_signal_handler()", - "docstring": "Constructor of TensorBoardDebugWrapperSession. Args: sess: The instance to be wrapped. grpc_debug_server_addresses: gRPC address(es) of debug server(s), as a or a of s. E.g., \"localhost:2333\", \"grpc://localhost:2333\", [\"192.168.0.7:2333\", \"192.168.0.8:2333\"]. thread_name_filter: Optional filter for thread names. send_traceback_and_source_code: Whether traceback of graph elements and the source code are to be sent to the debug server(s).", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\grpc_wrapper.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:sess arg:grpc_debug_server_addresses arg:thread_name_filter arg:send_traceback_and_source_code FunctionDef name:_gated_grpc_watch_fn arguments arg:fetches arg:feeds Return return:yes Assign Assign" - }, - { - "library": "pytorch", - "name": "del_tensors", - "source_code": "def del_tensors(self, names: Iterable[str]) -> None: for name in names: self.del_tensor(name)", - "docstring": "Delete the attributes specified by the given paths. For example, to delete the attributes mod.layer1.conv1.weight and mod.layer1.conv1.bias, use accessor.del_tensors([\"layer1.conv1.weight\", \"layer1.conv1.bias\"])", - "type": "method", - "file_path": "pytorch\\torch\\nn\\utils\\_named_member_accessor.py", - "ast_data": "FunctionDef name:del_tensors arguments arg:self arg:names type:Iterable[str] For" - }, - { - "library": "scikit-learn", - "name": "fit_predict", - "source_code": "def fit_predict(self, X, y = None): return super().fit_predict(X, y)", - "docstring": "Perform spectral clustering on and return cluster labels. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) or (n_samples, n_samples) Training instances to cluster, similarities / affinities between instances if ``. y : Ignored Not used, present here for API consistency by convention. Returns ------- labels : ndarray of shape (n_samples,) Cluster labels.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\cluster\\_spectral.py", - "ast_data": "FunctionDef name:fit_predict arguments arg:self arg:X arg:y Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_edgecolor", - "source_code": "def set_edgecolor(self, color): self._original_edgecolor = color self._set_edgecolor(color)", - "docstring": "Set the patch edge color. Parameters ---------- color : :mpltype: or None", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:set_edgecolor arguments arg:self arg:color Assign" - }, - { - "library": "scipy", - "name": "time_count_neighbors_deep", - "source_code": "def time_count_neighbors_deep(self, mn1n2, Nr): self.T1d.count_neighbors(self.T2d, self.r)", - "docstring": "Count neighbors for a very deep kd-tree dim | # points T1 | # points T2 | Nr", - "type": "method", - "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py", - "ast_data": "FunctionDef name:time_count_neighbors_deep arguments arg:self arg:mn1n2 arg:Nr" - }, - { - "library": "pytorch", - "name": "get_device_properties", - "source_code": "def get_device_properties(device: Optional[_device_t] = None) -> _CudaDeviceProperties: _lazy_init() device = _get_device_index(device, optional = True) if device < 0 or device > = device_count(): raise AssertionError('Invalid device id') return _get_device_properties(device)", - "docstring": "Get the properties of a device. Args: device (torch.device or int or str, optional): device for which to return the properties of the device. It uses the current device, given by :func:, if :attr: is `` (default). Returns: _CudaDeviceProperties: the properties of the device", - "type": "function", - "file_path": "pytorch\\torch\\cuda\\__init__.py", - "ast_data": "FunctionDef name:get_device_properties arguments arg:device type:Optional[_device_t] Assign Call call:_get_device_index If BoolOp Compare op:Lt Compare op:GtE Raise raises:AssertionError('Invalid device id') Return return:yes" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, offset = (0, 0), spacing = 10.0, angle = 45.0, length = np.sqrt(2), **kwargs): super().__init__(offset) self._spacing = spacing self._angle = angle self._length = length self._gc = kwargs", - "docstring": "Parameters ---------- offset : (float, float), default: (0, 0) The (x, y) offset to apply to the path, in points. spacing : float, default: 10.0 The spacing between ticks in points. angle : float, default: 45.0 The angle between the path and the tick in degrees. The angle is measured as if you were an ant walking along the curve, with zero degrees pointing directly ahead, 90 to your left, -90 to your right, and 180 behind you. To change side of the ticks, change sign of the angle. length : float, default: 1.414 The length of the tick relative to spacing. Recommended length = 1.414 (sqrt(2)) when angle=45, length=1.0 when angle=90 and length=2.0 when angle=60. **kwargs Extra keywords are stored and passed through to :meth:. Examples -------- See :doc:.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patheffects.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:offset arg:spacing arg:angle arg:length kwarg:kwargs Assign Assign Assign Assign" - }, - { - "library": "matplotlib", - "name": "set_default_intervals", - "source_code": "def set_default_intervals(self): pass", - "docstring": "Set the default limits for the axis data and view interval if they have not been not mutated yet.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axis.py", - "ast_data": "FunctionDef name:set_default_intervals arguments arg:self" - }, - { - "library": "matplotlib", - "name": "rotate", - "source_code": "def rotate(self, theta): a = math.cos(theta) b = math.sin(theta) mtx = self._mtx (xx, xy, x0), (yx, yy, y0), _ = mtx.tolist() mtx[0, 0] = a * xx - b * yx mtx[0, 1] = a * xy - b * yy mtx[0, 2] = a * x0 - b * y0 mtx[1, 0] = b * xx + a * yx mtx[1, 1] = b * xy + a * yy mtx[1, 2] = b * x0 + a * y0 self.invalidate() return self", - "docstring": "Add a rotation (in radians) to this transform in place. Returns *self*, so this method can easily be chained with more calls to :meth:, :meth:, :meth: and :meth:.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", - "ast_data": "FunctionDef name:rotate arguments arg:self arg:theta Assign Call call:cos Assign Call call:sin Assign Assign Call call:tolist Assign Assign Assign Assign Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "is_shared", - "source_code": "def is_shared(self): if has_torch_function_unary(self): return handle_torch_function(Tensor.is_shared, (self,), self) return self._typed_storage()._is_shared()", - "docstring": "Checks if tensor is in shared memory. This is always `` for CUDA tensors.", - "type": "method", - "file_path": "pytorch\\torch\\_tensor.py", - "ast_data": "FunctionDef name:is_shared arguments arg:self If Call call:has_torch_function_unary Return return:yes Return return:yes" - }, - { - "library": "pygame", - "name": "move_to_front", - "source_code": "def move_to_front(self, sprite): self.change_layer(sprite, self.get_top_layer())", - "docstring": "bring the sprite to front layer LayeredUpdates.move_to_front(sprite): return None Brings the sprite to front by changing the sprite layer to the top-most layer. The sprite is added at the end of the list of sprites in that top-most layer.", - "type": "method", - "file_path": "pygame\\src_py\\sprite.py", - "ast_data": "FunctionDef name:move_to_front arguments arg:self arg:sprite" - }, - { - "library": "tensorflow", - "name": "broadcast_tensor", - "source_code": "def broadcast_tensor(self, tensor): return array_ops.gather(tensor, self.gather_index)", - "docstring": "Broadcast from a dense tensor. It is assumed that the first axis of the dense tensor is indexed by the source shape, and at the end, the first axis of the dense tensor is indexed by the destination shape. Args: tensor: a dense tensor. Returns: A dense tensor.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", - "ast_data": "FunctionDef name:broadcast_tensor arguments arg:self arg:tensor Return return:yes" - }, - { - "library": "mongo", - "name": "download_to_stream", - "source_code": "@_csot.apply async def download_to_stream(self, file_id: Any, destination: Any, session: Optional[AsyncClientSession] = None) -> None: async with await self.open_download_stream(file_id, session = session) as gout: while True: chunk = await gout.readchunk() if not len(chunk): break destination.write(chunk)", - "docstring": "Downloads the contents of the stored file specified by file_id and writes the contents to . For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) # Get _id of file to read file_id = fs.upload_from_stream(\"test_file\", \"data I want to store!\") # Get file to write to file = open('myfile','wb+') fs.download_to_stream(file_id, file) file.seek(0) contents = file.read() Raises :exc: if no file with file_id exists. :param file_id: The _id of the file to be downloaded. :param destination: a file-like object implementing :meth:. :param session: a :class: .. versionchanged:: 3.6 Added `` parameter.", - "type": "method", - "file_path": "mongo\\gridfs\\asynchronous\\grid_file.py", - "ast_data": "AsyncFunctionDef name:download_to_stream arguments arg:self arg:file_id type:Any arg:destination type:Any arg:session type:Optional[AsyncClientSession]" - }, - { - "library": "django", - "name": "has_key", - "source_code": "def has_key(self, key, version = None): return self.get(key, self._missing_key, version = version) is not self._missing_key", - "docstring": "Return True if the key is in the cache and has not expired.", - "type": "method", - "file_path": "django\\django\\core\\cache\\backends\\base.py", - "ast_data": "FunctionDef name:has_key arguments arg:self arg:key arg:version Return return:yes" - }, - { - "library": "django", - "name": "get_rollback", - "source_code": "def get_rollback(using = None): return get_connection(using).get_rollback()", - "docstring": "Get the \"needs rollback\" flag -- for *advanced use* only.", - "type": "function", - "file_path": "django\\django\\db\\transaction.py", - "ast_data": "FunctionDef name:get_rollback arguments arg:using Return return:yes" - }, - { - "library": "matplotlib", - "name": "index", - "source_code": "@property def index(self): return self.font._index_dvi_to_freetype(self.glyph)", - "docstring": "The FreeType index of this glyph (that can be passed to FT_Load_Glyph).", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\dviread.py", - "ast_data": "FunctionDef name:index arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "valid_index_tensor", - "source_code": "def valid_index_tensor(index, dims): slice_count = 0 for s in index: if isinstance(s, slice): slice_count + = 1 if slice_count > len(dims): return F() else: return T()", - "docstring": "if the slice instances exceed the length of the dimensions then this is a type error so we return False", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py", - "ast_data": "FunctionDef name:valid_index_tensor arguments arg:index arg:dims Assign For If Call call:isinstance If Compare op:Gt Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "set_codes", - "source_code": "def set_codes(self, codes, *, level = None, verify_integrity: bool = True) -> MultiIndex: level, codes = _require_listlike(level, codes, 'Codes') idx = self._view() idx._reset_identity() idx._set_codes(codes, level = level, verify_integrity = verify_integrity) return idx", - "docstring": "Set new codes on MultiIndex. Defaults to returning new index. Parameters ---------- codes : sequence or list of sequence New codes to apply. level : int, level name, or sequence of int/level names (default None) Level(s) to set (None for all levels). verify_integrity : bool, default True If True, checks that levels and codes are compatible. Returns ------- new index (of same type and class...etc) or None The same type as the caller or None if ``. See Also -------- MultiIndex.set_levels : Set new levels on MultiIndex. MultiIndex.codes : Get the codes of the levels in the MultiIndex. MultiIndex.levels : Get the levels of the MultiIndex. Examples -------- >>> idx = pd.MultiIndex.from_tuples( ... [(1, \"one\"), (1, \"two\"), (2, \"one\"), (2, \"two\")], names=[\"foo\", \"bar\"] ... ) >>> idx MultiIndex([(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')], names=['foo', 'bar']) >>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]]) MultiIndex([(2, 'one'), (1, 'one'), (2, 'two'), (1, 'two')], names=['foo', 'bar']) >>> idx.set_codes([1, 0, 1, 0], level=0) MultiIndex([(2, 'one'), (1, 'two'), (2, 'one'), (1, 'two')], names=['foo', 'bar']) >>> idx.set_codes([0, 0, 1, 1], level=\"bar\") MultiIndex([(1, 'one'), (1, 'one'), (2, 'two'), (2, 'two')], names=['foo', 'bar']) >>> idx.set_codes([[1, 0, 1, 0], [0, 0, 1, 1]], level=[0, 1]) MultiIndex([(2, 'one'), (1, 'one'), (2, 'two'), (1, 'two')], names=['foo', 'bar'])", - "type": "method", - "file_path": "pandas\\pandas\\core\\indexes\\multi.py", - "ast_data": "FunctionDef name:set_codes arguments arg:self arg:codes Assign Call call:_require_listlike Assign Call call:_view Return return:yes" - }, - { - "library": "tensorflow", - "name": "tensor_not_equals", - "source_code": "@tf_export('__operators__.ne', v1 = []) @dispatch.add_dispatch_support def tensor_not_equals(self, other): if other is None: return True if tensor_lib.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions(): self, other = override_binary_operator.maybe_promote_tensors(self, other) return gen_math_ops.not_equal(self, other, incompatible_shape_error = False) else: return self is not other", - "docstring": "The operation invoked by the operator. Compares two tensors element-wise for inequality if they are broadcast-compatible; or returns True if they are not broadcast-compatible. (Note that this behavior differs from , which raises an exception if the two tensors are not broadcast-compatible.) Purpose in the API: This method is exposed in TensorFlow's API so that library developers can register dispatching for to allow it to handle custom composite tensors & other custom objects. The API symbol is not intended to be called by users directly and does appear in TensorFlow's generated documentation. Args: self: The left-hand side of the operator. other: The right-hand side of the operator. Returns: The result of the elementwise operation, or if the arguments are not broadcast-compatible.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\math_ops.py", - "ast_data": "FunctionDef name:tensor_not_equals arguments arg:self arg:other Call call:tf_export If Compare op:Is Return return:yes If BoolOp Call call:executing_eagerly_outside_functions Assign Call call:maybe_promote_tensors Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "tricontourf", - "source_code": "@_docstring.Substitution(func = 'tricontourf', type = 'regions') @_docstring.interpd def tricontourf(ax, *args, **kwargs): kwargs['filled'] = True return TriContourSet(ax, *args, **kwargs)", - "docstring": "%(_tricontour_doc)s hatches : list[str], optional A list of crosshatch patterns to use on the filled areas. If None, no hatching will be added to the contour. Notes ----- fills intervals that are closed at the top; that is, for boundaries *z1* and *z2*, the filled region is:: z1 < Z <= z2 except for the lowest interval, which is closed on both sides (i.e. it includes the lowest value).", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\tri\\_tricontour.py", - "ast_data": "FunctionDef name:tricontourf arguments arg:ax vararg:args kwarg:kwargs Call call:Substitution Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "restore_region", - "source_code": "def restore_region(self, region, bbox = None, xy = None): if bbox is not None or xy is not None: if bbox is None: x1, y1, x2, y2 = region.get_extents() elif isinstance(bbox, BboxBase): x1, y1, x2, y2 = bbox.extents else: x1, y1, x2, y2 = bbox if xy is None: ox, oy = (x1, y1) else: ox, oy = xy self._renderer.restore_region(region, int(x1), int(y1), int(x2), int(y2), int(ox), int(oy)) else: self._renderer.restore_region(region)", - "docstring": "Restore the saved region. If bbox (instance of BboxBase, or its extents) is given, only the region specified by the bbox will be restored. *xy* (a pair of floats) optionally specifies the new position (the LLC of the original region, not the LLC of the bbox) where the region will be restored. >>> region = renderer.copy_from_bbox() >>> x1, y1, x2, y2 = region.get_extents() >>> renderer.restore_region(region, bbox=(x1+dx, y1, x2, y2), ... xy=(x1-dx, y1))", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_agg.py", - "ast_data": "FunctionDef name:restore_region arguments arg:self arg:region arg:bbox arg:xy If BoolOp Compare op:IsNot Compare op:IsNot If Compare op:Is Assign Call call:get_extents If Call call:isinstance Assign Assign If Compare op:Is Assign Assign" - }, - { - "library": "scikit-learn", - "name": "MethodMapping", - "source_code": "class MethodMapping: def __init__(self): self._routes = [] def __iter__(self): return iter(self._routes) def add(self, *, caller, callee): if caller not in METHODS: raise ValueError(f'Given caller: {caller} is not a valid method. Valid methods are: {METHODS}') if callee not in METHODS: raise ValueError(f'Given callee: {callee} is not a valid method. Valid methods are: {METHODS}') self._routes.append(MethodPair(caller = caller, callee = callee)) return self def _serialize(self): result = list() for route in self._routes: result.append({'caller': route.caller, 'callee': route.callee}) return result def __repr__(self): return str(self._serialize()) def __str__(self): return str(repr(self))", - "docstring": "Stores the mapping between caller and callee methods for a router. This class is primarily used in a `` instances. .. versionadded:: 1.3", - "type": "class", - "file_path": "scikit-learn\\sklearn\\utils\\_metadata_requests.py", - "ast_data": "ClassDef name:MethodMapping FunctionDef name:__init__ arguments arg:self Assign FunctionDef name:__iter__ arguments arg:self Return return:yes FunctionDef name:add arguments arg:self If Compare op:NotIn Raise raises:ValueError(f'Given caller:{caller} is not a valid method. Valid methods are: {METHODS}') If Compare op:NotIn Raise raises:ValueError(f'Given callee:{callee} is not a valid method. Valid methods are: {METHODS}') Return return:yes FunctionDef name:_serialize arguments arg:self Assign Call call:list For Return return:yes FunctionDef name:__repr__ arguments arg:self Return return:yes FunctionDef name:__str__ arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "is_realized", - "source_code": "def is_realized(self): return True", - "docstring": "Used by LazyVariableTracker to indicate an unrealized node", - "type": "method", - "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py", - "ast_data": "FunctionDef name:is_realized arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "plot_examples", - "source_code": "def plot_examples(colormaps): np.random.seed(19680801) data = np.random.randn(30, 30) n = len(colormaps) fig, axs = plt.subplots(1, n, figsize = (n * 2 + 2, 3), layout = 'constrained', squeeze = False) for [ax, cmap] in zip(axs.flat, colormaps): psm = ax.pcolormesh(data, cmap = cmap, rasterized = True, vmin = -4, vmax = 4) fig.colorbar(psm, ax = ax) plt.show()", - "docstring": "Helper function to plot data with associated colormap.", - "type": "function", - "file_path": "matplotlib\\galleries\\users_explain\\colors\\colormap-manipulation.py", - "ast_data": "FunctionDef name:plot_examples arguments arg:colormaps Assign Call call:randn Assign Call call:len Assign Call call:subplots For Call call:zip Assign Call call:pcolormesh" - }, - { - "library": "tensorflow", - "name": "file_crc32", - "source_code": "def file_crc32(filename, block_size = _DEFAULT_BLOCK_SIZE): crc = 0 with FileIO(filename, mode = 'rb') as f: chunk = f.read(n = block_size) while chunk: crc = binascii.crc32(chunk, crc) chunk = f.read(n = block_size) return hex(crc & 4294967295)", - "docstring": "Get the crc32 of the passed file. The crc32 of a file can be used for error checking; two files with the same crc32 are considered equivalent. Note that the entire file must be read to produce the crc32. Args: filename: string, path to a file block_size: Integer, process the files by reading blocks of bytes. Use -1 to read the file as once. Returns: hexadecimal as string, the crc32 of the passed file.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", - "ast_data": "FunctionDef name:file_crc32 arguments arg:filename arg:block_size Assign With Assign Call call:read While Assign Call call:crc32 Assign Call call:read Return return:yes" - }, - { - "library": "virtualenv", - "name": "quote", - "source_code": "@staticmethod def quote(string): return shlex.quote(string)", - "docstring": "Quote strings in the activation script. :param string: the string to quote :return: quoted string that works in the activation script", - "type": "method", - "file_path": "virtualenv\\src\\virtualenv\\activation\\via_template.py", - "ast_data": "FunctionDef name:quote arguments arg:string Return return:yes" - }, - { - "library": "pytorch", - "name": "get_tracker_snapshot", - "source_code": "def get_tracker_snapshot(self, type: str = 'current') -> dict[torch.device, dict[str, int]]: if type = = 'current': return deepcopy(self._curr_mem_snap) elif type = = 'peak': return deepcopy(self._peak_mem_snap) else: raise ValueError(f'Invalid type {type}')", - "docstring": "Capture a snapshot of the memory usage breakdown per device, based on the specified type. Args: type (str): The type of snapshot to capture. Can be \"current\" for the current memory usage or \"peak\" for the peak memory usage. Defaults to \"current\". Returns: Dict[torch.device, Dict[str, int]]: A dictionary where each key is a torch.device, and each value is another dictionary. This inner dictionary has keys representing memory reference types as defined in `` and values representing the amount of memory consumed in bytes. Raises: ValueError: If an invalid type is specified.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\_tools\\mem_tracker.py", - "ast_data": "FunctionDef name:get_tracker_snapshot arguments arg:self arg:type type:str If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes Raise raises:ValueError(f'Invalid type {type}')" - }, - { - "library": "tensorflow", - "name": "size", - "source_code": "def size(self, name = None): with ops.name_scope(name, '%s_Size' % self.name, [self.resource_handle]): return gen_lookup_ops.lookup_table_size_v2(self.resource_handle)", - "docstring": "Compute the number of elements in this table. Args: name: A name for the operation (optional). Returns: A scalar tensor containing the number of elements in this table.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", - "ast_data": "FunctionDef name:size arguments arg:self arg:name With Return return:yes" - }, - { - "library": "tensorflow", - "name": "to_proto", - "source_code": "def to_proto(self, export_scope = None): if export_scope is None or self.name.startswith(export_scope): context_def = control_flow_pb2.CondContextDef() context_def.context_name = ops.strip_name_scope(self.name, export_scope) context_def.pred_name = ops.strip_name_scope(self._pred.name, export_scope) context_def.pivot_name = ops.strip_name_scope(self._pivot.name, export_scope) context_def.branch = self._branch context_def.values_def.MergeFrom(super(CondContext, self)._to_values_def(export_scope)) for nested in self._nested_contexts: nested_def = context_def.nested_contexts.add() nested.to_control_flow_context_def(nested_def) return context_def else: return None", - "docstring": "Converts a to a protocol buffer. Args: export_scope: Optional . Name scope to remove. Returns: A protocol buffer.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", - "ast_data": "FunctionDef name:to_proto arguments arg:self arg:export_scope If BoolOp Compare op:Is Call call:startswith Assign Call call:CondContextDef Assign Call call:strip_name_scope Assign Call call:strip_name_scope Assign Call call:strip_name_scope Assign For Assign Call call:add Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "sparse_series_to_coo", - "source_code": "def sparse_series_to_coo(ss: Series, row_levels: Iterable[int] = (0,), column_levels: Iterable[int] = (1,), sort_labels: bool = False) -> tuple[scipy.sparse.coo_matrix, list[IndexLabel], list[IndexLabel]]: import scipy.sparse if ss.index.nlevels < 2: raise ValueError('to_coo requires MultiIndex with nlevels > = 2.') if not ss.index.is_unique: raise ValueError('Duplicate index entries are not allowed in to_coo transformation.') row_levels = [ss.index._get_level_number(x) for x in row_levels] column_levels = [ss.index._get_level_number(x) for x in column_levels] v, i, j, rows, columns = _to_ijv(ss, row_levels = row_levels, column_levels = column_levels, sort_labels = sort_labels) sparse_matrix = scipy.sparse.coo_matrix((v, (i, j)), shape = (len(rows), len(columns))) return (sparse_matrix, rows, columns)", - "docstring": "Convert a sparse Series to a scipy.sparse.coo_matrix using index levels row_levels, column_levels as the row and column labels respectively. Returns the sparse_matrix, row and column labels.", - "type": "function", - "file_path": "pandas\\pandas\\core\\arrays\\sparse\\scipy_sparse.py", - "ast_data": "FunctionDef name:sparse_series_to_coo arguments arg:ss type:Series arg:row_levels type:Iterable[int] arg:column_levels type:Iterable[int] arg:sort_labels type:bool If Compare op:Lt Raise raises:ValueError('to_coo requires MultiIndex with nlevels >= 2.') If Raise raises:ValueError('Duplicate index entries are not allowed in to_coo transformation.') Assign Assign Assign Call call:_to_ijv Assign Call call:coo_matrix Return return:yes" - }, - { - "library": "pandas", - "name": "NumExprClobberingError", - "source_code": "class NumExprClobberingError(NameError): pass", - "docstring": "Exception raised when trying to use a built-in numexpr name as a variable name. `` will throw the error if the engine is set to 'numexpr'. 'numexpr' is the default engine value for these methods if the numexpr package is installed. See Also -------- eval : Evaluate a Python expression as a string using various backends. DataFrame.query : Query the columns of a DataFrame with a boolean expression. Examples -------- >>> df = pd.DataFrame({\"abs\": [1, 1, 1]}) >>> df.query(\"abs > 2\") # doctest: +SKIP ... # NumExprClobberingError: Variables in expression \"(abs) > (2)\" overlap... >>> sin, a = 1, 2 >>> pd.eval(\"sin + a\", engine=\"numexpr\") # doctest: +SKIP ... # NumExprClobberingError: Variables in expression \"(sin) + (a)\" overlap...", - "type": "class", - "file_path": "pandas\\pandas\\errors\\__init__.py", - "ast_data": "ClassDef name:NumExprClobberingError" - }, - { - "library": "kornia", - "name": "tensor_to_image", - "source_code": "def tensor_to_image(tensor: Tensor, keepdim: bool = False, force_contiguous: bool = False) -> Any: if not isinstance(tensor, Tensor): raise TypeError(f'Input type is not a Tensor. Got {type(tensor)}') if len(tensor.shape) > 4 or len(tensor.shape) < 2: raise ValueError('Input size must be a two, three or four dimensional tensor') input_shape = tensor.shape image = tensor.cpu().detach() if len(input_shape) = = 2: pass elif len(input_shape) = = 3: if input_shape[0] = = 1: image = image.squeeze() else: image = image.permute(1, 2, 0) elif len(input_shape) = = 4: image = image.permute(0, 2, 3, 1) if input_shape[0] = = 1 and (not keepdim): image = image.squeeze(0) if input_shape[1] = = 1: image = image.squeeze(-1) else: raise ValueError(f'Cannot process tensor with shape {input_shape}') if force_contiguous: image = image.contiguous() return image.numpy()", - "docstring": "Convert a PyTorch tensor image to a numpy image. In case the tensor is in the GPU, it will be copied back to CPU. Args: tensor: image of the form :math:, :math: or :math:. keepdim: If `(H, W, C)(H, W)contiguous(H, W)(H, W, C)(B, H, W, C)`. Example: >>> img = torch.ones(1, 3, 3) >>> tensor_to_image(img).shape (3, 3) >>> img = torch.ones(3, 4, 4) >>> tensor_to_image(img).shape (4, 4, 3)", - "type": "function", - "file_path": "kornia\\kornia\\utils\\image.py", - "ast_data": "FunctionDef name:tensor_to_image arguments arg:tensor type:Tensor arg:keepdim type:bool arg:force_contiguous type:bool If Raise raises:TypeError(f'Input type is not a Tensor. Got {type(tensor)}') If BoolOp Compare op:Gt Compare op:Lt Raise raises:ValueError('Input size must be a two, three or four dimensional tensor') Assign Assign Call call:detach If Compare op:Eq If Compare op:Eq If Compare op:Eq Assign Call call:squeeze Assign Call call:permute If Compare op:Eq Assign Call call:permute If BoolOp Compare op:Eq Assign Call call:squeeze If Compare op:Eq Assign Call call:squeeze Raise raises:ValueError(f'Cannot process tensor with shape {input_shape}') If Assign Call call:contiguous Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_pickradius", - "source_code": "def get_pickradius(self): return self._pickradius", - "docstring": "Return the pick radius used for containment tests. See for more details.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\lines.py", - "ast_data": "FunctionDef name:get_pickradius arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, parent, subplotspec, *, facecolor = None, edgecolor = None, linewidth = 0.0, frameon = None, **kwargs): super().__init__(**kwargs) if facecolor is None: facecolor = 'none' edgecolor = mpl._val_or_rc(edgecolor, 'figure.edgecolor') frameon = mpl._val_or_rc(frameon, 'figure.frameon') self._subplotspec = subplotspec self._parent = parent self._root_figure = parent._root_figure self._axstack = parent._axstack self.subplotpars = parent.subplotpars self.dpi_scale_trans = parent.dpi_scale_trans self._axobservers = parent._axobservers self.transFigure = parent.transFigure self.bbox_relative = Bbox.null() self._redo_transform_rel_fig() self.figbbox = self._parent.figbbox self.bbox = TransformedBbox(self.bbox_relative, self._parent.transSubfigure) self.transSubfigure = BboxTransformTo(self.bbox) self.patch = Rectangle(xy = (0, 0), width = 1, height = 1, visible = frameon, facecolor = facecolor, edgecolor = edgecolor, linewidth = linewidth, in_layout = False, transform = self.transSubfigure) self._set_artist_props(self.patch) self.patch.set_antialiased(False)", - "docstring": "Parameters ---------- parent : or Figure or subfigure that contains the SubFigure. SubFigures can be nested. subplotspec : Defines the region in a parent gridspec where the subfigure will be placed. facecolor : default: `figure.edgecolorfigure.frameon.SubFigure` properties, optional %(SubFigure:kwdoc)s", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\figure.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:parent arg:subplotspec kwarg:kwargs If Compare op:Is Assign Assign Call call:_val_or_rc Assign Call call:_val_or_rc Assign Assign Assign Assign Assign Assign Assign Assign Assign Call call:null Assign Assign Call call:TransformedBbox Assign Call call:BboxTransformTo Assign Call call:Rectangle" - }, - { - "library": "mongo", - "name": "unpack_response", - "source_code": "def unpack_response(self, cursor_id: Optional[int] = None, codec_options: CodecOptions = _UNICODE_REPLACE_CODEC_OPTIONS, user_fields: Optional[Mapping[str, Any]] = None, legacy_response: bool = False) -> list[dict[str, Any]]: self.raw_response(cursor_id) if legacy_response: return bson.decode_all(self.documents, codec_options) return bson._decode_all_selective(self.documents, codec_options, user_fields)", - "docstring": "Unpack a response from the database and decode the BSON document(s). Check the response for errors and unpack, returning a dictionary containing the response data. Can raise CursorNotFound, NotPrimaryError, ExecutionTimeout, or OperationFailure. :param cursor_id: cursor_id we sent to get this response - used for raising an informative exception when we get cursor id not valid at server response :param codec_options: an instance of :class: :param user_fields: Response fields that should be decoded using the TypeDecoders from codec_options, passed to bson._decode_all_selective.", - "type": "method", - "file_path": "mongo\\pymongo\\message.py", - "ast_data": "FunctionDef name:unpack_response arguments arg:self arg:cursor_id type:Optional[int] arg:codec_options type:CodecOptions arg:user_fields type:Optional[Mapping[str, Any]] arg:legacy_response type:bool If Return return:yes Return return:yes" - }, - { - "library": "seaborn", - "name": "categorical_order", - "source_code": "def categorical_order(vector, order = None): if order is None: if hasattr(vector, 'categories'): order = vector.categories else: try: order = vector.cat.categories except (TypeError, AttributeError): order = pd.Series(vector).unique() if variable_type(vector) = = 'numeric': order = np.sort(order) order = filter(pd.notnull, order) return list(order)", - "docstring": "Return a list of unique data values. Determine an ordered list of levels in `` object. Returns ------- order : list Ordered list of category levels not including null values.", - "type": "function", - "file_path": "seaborn\\seaborn\\_base.py", - "ast_data": "FunctionDef name:categorical_order arguments arg:vector arg:order If Compare op:Is If Call call:hasattr Assign Try Assign ExceptHandler Assign Call call:unique If Compare op:Eq Assign Call call:sort Assign Call call:filter Return return:yes" - }, - { - "library": "django", - "name": "parse_header_parameters", - "source_code": "def parse_header_parameters(line, max_length = MAX_HEADER_LENGTH): if max_length is not None and line and (len(line) > max_length): raise ValueError('Unable to parse header parameters (value too long).') m = Message() m['content-type'] = line params = m.get_params() pdict = {} key = params.pop(0)[0].lower() for name, value in params: if not name: continue if isinstance(value, tuple): value = collapse_rfc2231_value(value) pdict[name] = value return (key, pdict)", - "docstring": "Parse a Content-type like header. Return the main content-type and a dictionary of options. If is longer than , is raised.", - "type": "function", - "file_path": "django\\django\\utils\\http.py", - "ast_data": "FunctionDef name:parse_header_parameters arguments arg:line arg:max_length If BoolOp Compare op:IsNot Compare op:Gt Raise raises:ValueError('Unable to parse header parameters (value too long).') Assign Call call:Message Assign Assign Call call:get_params Assign Assign Call call:lower For If If Call call:isinstance Assign Call call:collapse_rfc2231_value Assign Return return:yes" - }, - { - "library": "django", - "name": "get_warning_for_invalid_pattern", - "source_code": "def get_warning_for_invalid_pattern(pattern): if isinstance(pattern, str): hint = \"Try removing the string '{}'. The list of urlpatterns should not have a prefix string as the first element.\".format(pattern) elif isinstance(pattern, tuple): hint = 'Try using path() instead of a tuple.' else: hint = None return [Error('Your URL pattern {!r} is invalid. Ensure that urlpatterns is a list of path() and/or re_path() instances.'.format(pattern), hint = hint, id = 'urls.E004')]", - "docstring": "Return a list containing a warning that the pattern is invalid. describe_pattern() cannot be used here, because we cannot rely on the urlpattern having regex or name attributes.", - "type": "function", - "file_path": "django\\django\\core\\checks\\urls.py", - "ast_data": "FunctionDef name:get_warning_for_invalid_pattern arguments arg:pattern If Call call:isinstance Assign Call call:format If Call call:isinstance Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "load_state_dict", - "source_code": "@override def load_state_dict(self, state_dict: dict[str, Any]) -> None: lr_lambdas = state_dict.pop('lr_lambdas') self.__dict__.update(state_dict) state_dict['lr_lambdas'] = lr_lambdas for idx, fn in enumerate(lr_lambdas): if fn is not None: self.lr_lambdas[idx].__dict__.update(fn)", - "docstring": "Load the scheduler's state. Args: state_dict (dict): scheduler state. Should be an object returned from a call to :meth:.", - "type": "method", - "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", - "ast_data": "FunctionDef name:load_state_dict arguments arg:self arg:state_dict type:dict[str, Any] Assign Call call:pop Assign For Call call:enumerate If Compare op:IsNot" - }, - { - "library": "tensorflow", - "name": "sparse_split", - "source_code": "@tf_export(v1 = ['sparse.split', 'sparse_split']) @deprecation.deprecated_endpoints('sparse_split') @deprecation.deprecated_args(None, 'split_dim is deprecated, use axis instead', 'split_dim') def sparse_split(keyword_required = KeywordRequired(), sp_input = None, num_split = None, axis = None, name = None, split_dim = None): if not isinstance(keyword_required, KeywordRequired): raise ValueError('Keyword arguments are required for this function.') if sp_input is None: raise ValueError('sp_input is required') if num_split is None: raise ValueError('num_split is required') if axis is None: raise ValueError('axis is required') axis = deprecation.deprecated_argument_lookup('axis', axis, 'split_dim', split_dim) sp_input = _convert_to_sparse_tensor(sp_input) output_inds, output_vals, output_shapes = gen_sparse_ops.sparse_split(axis, sp_input.indices, sp_input.values, sp_input.dense_shape, num_split, name = name) sparse_tensors = [] for i in range(0, num_split): sparse_tensors.append(sparse_tensor.SparseTensor(output_inds[i], output_vals[i], output_shapes[i])) return sparse_tensors", - "docstring": "Split a into tensors along . If the is not an integer multiple of each slice starting from 0: gets extra one dimension. For example, if and and the input is: input_tensor = shape = [2, 7] [ a d e ] [b c ] Graphically the output tensors are: output_tensor[0] = [ a ] [b c ] output_tensor[1] = [ d e ] [ ] Args: keyword_required: Python 2 standin for * (temporary for argument reorder) sp_input: The to split. num_split: A Python integer. The number of ways to split. axis: A 0-D . The dimension along which to split. Must be in range [-rank, rank), where rank is the number of dimensions in the input . name: A name for the operation (optional). split_dim: Deprecated old name for axis. Returns: objects resulting from splitting . Raises: TypeError: If is not a . ValueError: If the deprecated and are both non None.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py", - "ast_data": "FunctionDef name:sparse_split arguments arg:keyword_required arg:sp_input arg:num_split arg:axis arg:name arg:split_dim Call call:tf_export Call call:deprecated_endpoints Call call:deprecated_args If Raise raises:ValueError('Keyword arguments are required for this function.') If Compare op:Is Raise raises:ValueError('sp_input is required') If Compare op:Is Raise raises:ValueError('num_split is required') If Compare op:Is Raise raises:ValueError('axis is required') Assign Call call:deprecated_argument_lookup Assign Call call:_convert_to_sparse_tensor Assign Call call:sparse_split Assign For Call call:range Return return:yes" - }, - { - "library": "matplotlib", - "name": "clf", - "source_code": "def clf(self, keep_observers = False): return self.clear(keep_observers = keep_observers)", - "docstring": "[*Discouraged*] Alias for the method. .. admonition:: Discouraged The use of `` instead. Parameters ---------- keep_observers : bool, default: False Set *keep_observers* to True if, for example, a gui widget is tracking the Axes in the figure.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\figure.py", - "ast_data": "FunctionDef name:clf arguments arg:self arg:keep_observers Return return:yes" - }, - { - "library": "pandas", - "name": "create_iter_data_given_by", - "source_code": "def create_iter_data_given_by(data: DataFrame, kind: str = 'hist') -> dict[Hashable, DataFrame | Series]: if kind = = 'hist': level = 0 else: level = 1 assert isinstance(data.columns, MultiIndex) return {col: data.loc[:, data.columns.get_level_values(level) = = col] for col in data.columns.levels[level]}", - "docstring": "Create data for iteration given is assigned or not, and it is only used in both hist and boxplot. If is assigned, return a dictionary of DataFrames in which the key of dictionary is the values in groups. If is not assigned, return input as is, and this preserves current status of iter_data. Parameters ---------- data : reformatted grouped data from method. kind : str, plot kind. This function is only used for and plots. Returns ------- iter_data : DataFrame or Dictionary of DataFrames Examples -------- If is assigned: >>> import numpy as np >>> tuples = [(\"h1\", \"a\"), (\"h1\", \"b\"), (\"h2\", \"a\"), (\"h2\", \"b\")] >>> mi = pd.MultiIndex.from_tuples(tuples) >>> value = [[1, 3, np.nan, np.nan], [3, 4, np.nan, np.nan], [np.nan, np.nan, 5, 6]] >>> data = pd.DataFrame(value, columns=mi) >>> create_iter_data_given_by(data) {'h1': h1 a b 0 1.0 3.0 1 3.0 4.0 2 NaN NaN, 'h2': h2 a b 0 NaN NaN 1 NaN NaN 2 5.0 6.0}", - "type": "function", - "file_path": "pandas\\pandas\\plotting\\_matplotlib\\groupby.py", - "ast_data": "FunctionDef name:create_iter_data_given_by arguments arg:data type:DataFrame arg:kind type:str If Compare op:Eq Assign Assign Return return:yes" - }, - { - "library": "kornia", - "name": "histogram", - "source_code": "def histogram(x: Tensor, bins: Tensor, bandwidth: Tensor, epsilon: float = 1e-10) -> Tensor: pdf, _ = marginal_pdf(x.unsqueeze(2), bins, bandwidth, epsilon) return pdf", - "docstring": "Estimate the histogram of the input tensor. The calculation uses kernel density estimation which requires a bandwidth (smoothing) parameter. Args: x: Input tensor to compute the histogram with shape :math:. bins: The number of bins to use the histogram :math:. bandwidth: Gaussian smoothing factor with shape shape [1]. epsilon: A scalar, for numerical stability. Returns: Computed histogram of shape :math:. Examples: >>> x = torch.rand(1, 10) >>> bins = torch.torch.linspace(0, 255, 128) >>> hist = histogram(x, bins, bandwidth=torch.tensor(0.9)) >>> hist.shape torch.Size([1, 128])", - "type": "function", - "file_path": "kornia\\kornia\\enhance\\histogram.py", - "ast_data": "FunctionDef name:histogram arguments arg:x type:Tensor arg:bins type:Tensor arg:bandwidth type:Tensor arg:epsilon type:float Assign Call call:marginal_pdf Return return:yes" - }, - { - "library": "pytorch", - "name": "prepare_datasets", - "source_code": "def prepare_datasets(self, df, other_datasets, cat_feature2cats, ranking = False): test_size, val_size = self.get_test_and_val_size() df_train_val, df_test = train_test_split(df, test_size = test_size, random_state = 42) train_val_size = 1 - test_size df_train, df_val = train_test_split(df_train_val, test_size = val_size / train_val_size, random_state = 42) datasets = {'train': df_train, 'val': df_val, 'test': df_test} self.add_real_datasets(datasets, other_datasets, cat_feature2cats, ranking) return datasets", - "docstring": "Splits the dataframe into train, val, and test sets. Also adds other datasets, specified by the user, to the train set.", - "type": "method", - "file_path": "pytorch\\torchgen\\_autoheuristic\\train_decision.py", - "ast_data": "FunctionDef name:prepare_datasets arguments arg:self arg:df arg:other_datasets arg:cat_feature2cats arg:ranking Assign Call call:get_test_and_val_size Assign Call call:train_test_split Assign Assign Call call:train_test_split Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "Definition", - "source_code": "class Definition(object): def __init__(self): self.param_of = None self.directives = {} def __repr__(self): return '%s[%d]' % (self.__class__.__name__, id(self))", - "docstring": "Definition objects describe a unique definition of a variable. Subclasses of this may be used by passing an appropriate factory function to resolve. Attributes: param_of: Optional[ast.AST] directives: Dict, optional definition annotations", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\static_analysis\\reaching_definitions.py", - "ast_data": "ClassDef name:Definition FunctionDef name:__init__ arguments arg:self Assign Assign FunctionDef name:__repr__ arguments arg:self Return return:yes" - }, - { - "library": "mongo", - "name": "previous_description", - "source_code": "@property def previous_description(self) -> ServerDescription: return self.__previous_description", - "docstring": "The previous :class:.", - "type": "method", - "file_path": "mongo\\pymongo\\monitoring.py", - "ast_data": "FunctionDef name:previous_description arguments arg:self Return return:yes" - }, - { - "library": "kornia", - "name": "rgb2short", - "source_code": "def rgb2short(rgb: str) -> Tuple[str, str]: rgb = _strip_hash(rgb) incs = (0, 95, 135, 175, 215, 255) parts = [int(h, 16) for h in re.split('(..)(..)(..)', rgb)[1: 4]] res = [] for part in parts: i = 0 while i < len(incs) - 1: s, b = (incs[i], incs[i + 1]) if s < = part < = b: s1 = abs(s - part) b1 = abs(b - part) if s1 < b1: closest = s else: closest = b res.append(closest) break i + = 1 _res = ''.join([f'{i: 02x}' for i in res]) equiv = RGB2SHORT_DICT[_res] return (equiv, _res)", - "docstring": "Find the closest xterm-256 approximation to the given RGB value. Args: rgb: Hex code representing an RGB value, eg, 'abcdef'. Returns: String between 0 and 255, compatible with xterm. Example: >>> rgb2short('123456') ('23', '005f5f') >>> rgb2short('ffffff') ('231', 'ffffff') >>> rgb2short('0DADD6') # vimeo logo ('38', '00afd7')", - "type": "function", - "file_path": "kornia\\kornia\\utils\\image_print.py", - "ast_data": "FunctionDef name:rgb2short arguments arg:rgb type:str Assign Call call:_strip_hash Assign Assign Assign For Assign While Compare op:Lt Assign If Compare op:LtE op:LtE Assign Call call:abs Assign Call call:abs If Compare op:Lt Assign Assign Assign Call call:join Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "batch_isend_irecv", - "source_code": "def batch_isend_irecv(p2p_op_list: list[P2POp]) -> list[Work]: _check_p2p_op_list(p2p_op_list) group = p2p_op_list[0].group if group is None: group = _get_default_group() device = p2p_op_list[0].tensor.device def peer_kwarg(op: P2POp) -> dict[str, int]: key = 'group_dst' if op.op = = isend else 'group_src' return {key: op.group_peer} if type(group) = = ProcessGroup and group._get_backend(device).supports_coalescing: with _coalescing_manager(group, device, async_ops = True) as cm: for p2p_op in p2p_op_list: p2p_op.op(p2p_op.tensor, group = p2p_op.group, tag = p2p_op.tag, **peer_kwarg(p2p_op)) return cm.works else: reqs = [] for p2p_op in p2p_op_list: work = p2p_op.op(p2p_op.tensor, group = p2p_op.group, tag = p2p_op.tag, **peer_kwarg(p2p_op)) if work: reqs.append(work) return reqs", - "docstring": "Send or Receive a batch of tensors asynchronously and return a list of requests. Process each of the operations in `torch.cuda.set_device` are allowed.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", - "ast_data": "FunctionDef name:batch_isend_irecv arguments arg:p2p_op_list type:list[P2POp] Assign If Compare op:Is Assign Call call:_get_default_group Assign FunctionDef name:peer_kwarg arguments arg:op type:P2POp Assign Return return:yes If BoolOp Compare op:Eq With For Return return:yes Assign For Assign Call call:op If Return return:yes" - }, - { - "library": "pandas", - "name": "insert", - "source_code": "def insert(self, loc: int, item: Interval) -> Self: left_insert, right_insert = self._validate_scalar(item) new_left = self.left.insert(loc, left_insert) new_right = self.right.insert(loc, right_insert) return self._shallow_copy(new_left, new_right)", - "docstring": "Return a new IntervalArray inserting new item at location. Follows Python numpy.insert semantics for negative values. Only Interval objects and NA can be inserted into an IntervalIndex Parameters ---------- loc : int item : Interval Returns ------- IntervalArray", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\interval.py", - "ast_data": "FunctionDef name:insert arguments arg:self arg:loc type:int arg:item type:Interval Assign Call call:_validate_scalar Assign Call call:insert Assign Call call:insert Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_label", - "source_code": "def set_label(self, label, *, loc = None, **kwargs): if self.orientation = = 'vertical': self.ax.set_ylabel(label, loc = loc, **kwargs) else: self.ax.set_xlabel(label, loc = loc, **kwargs) self.stale = True", - "docstring": "Add a label to the long axis of the colorbar. Parameters ---------- label : str The label text. loc : str, optional The location of the label. - For horizontal orientation one of {'left', 'center', 'right'} - For vertical orientation one of {'bottom', 'center', 'top'} Defaults to :rc: or :rc: depending on the orientation. **kwargs Keyword arguments are passed to / . Supported keywords are *labelpad* and properties.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\colorbar.py", - "ast_data": "FunctionDef name:set_label arguments arg:self arg:label kwarg:kwargs If Compare op:Eq Assign" - }, - { - "library": "django", - "name": "get_sortable_by", - "source_code": "def get_sortable_by(self, request): return self.sortable_by if self.sortable_by is not None else self.get_list_display(request)", - "docstring": "Hook for specifying which fields can be sorted in the changelist.", - "type": "method", - "file_path": "django\\django\\contrib\\admin\\options.py", - "ast_data": "FunctionDef name:get_sortable_by arguments arg:self arg:request Return return:yes" - }, - { - "library": "pytorch", - "name": "disable_observer", - "source_code": "def disable_observer(mod): if isinstance(mod, FakeQuantizeBase) or _is_fake_quant_script_module(mod): mod.disable_observer()", - "docstring": "Disable observation for this module. Disable observation for this module, if applicable. Example usage:: # model is any PyTorch model model.apply(torch.ao.quantization.disable_observer)", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\fake_quantize.py", - "ast_data": "FunctionDef name:disable_observer arguments arg:mod If BoolOp Call call:isinstance Call call:_is_fake_quant_script_module" - }, - { - "library": "tensorflow", - "name": "binary_elementwise_apis", - "source_code": "def binary_elementwise_apis(): return tuple(_BINARY_ELEMENTWISE_APIS)", - "docstring": "Returns a list of APIs that have been registered as binary elementwise.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py", - "ast_data": "FunctionDef name:binary_elementwise_apis arguments Return return:yes" - }, - { - "library": "django", - "name": "get_srid", - "source_code": "def get_srid(self, obj): srid = obj.srid if srid is None or self.srid = = -1 or (srid = = -1 and self.srid ! = -1): return self.srid else: return srid", - "docstring": "Return the default SRID for the given geometry or raster, taking into account the SRID set for the field. For example, if the input geometry or raster doesn't have an SRID, then the SRID of the field will be returned.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\db\\models\\fields.py", - "ast_data": "FunctionDef name:get_srid arguments arg:self arg:obj Assign If BoolOp Compare op:Is Compare op:Eq BoolOp Compare op:Eq Compare op:NotEq Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "keras_tensor_to_placeholder", - "source_code": "def keras_tensor_to_placeholder(x): if isinstance(x, KerasTensor): return x._to_placeholder() else: return x", - "docstring": "Construct a graph placeholder to represent a KerasTensor when tracing.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\keras_tensor.py", - "ast_data": "FunctionDef name:keras_tensor_to_placeholder arguments arg:x If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "kornia", - "name": "undistort", - "source_code": "def undistort(self, params: Tensor, points: Vector2) -> Vector2: fx, fy, cx, cy = (params[..., 0], params[..., 1], params[..., 2], params[..., 3]) x = (points.x - cx) / fx y = (points.y - cy) / fy return Vector2.from_coords(x, y)", - "docstring": "Undistort one or more Vector2 points using the affine transform. Args: params: Tensor representing the affine transform parameters. points: Vector2 representing the points to undistort. Returns: Vector2 representing the undistorted points. Example: >>> params = Tensor([1., 2., 3., 4.]) >>> points = Vector2.from_coords(1., 2.) >>> AffineTransform().undistort(params, points) x: -2.0 y: -1.0", - "type": "method", - "file_path": "kornia\\kornia\\sensors\\camera\\distortion_model.py", - "ast_data": "FunctionDef name:undistort arguments arg:self arg:params type:Tensor arg:points type:Vector2 Assign Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "save_binary", - "source_code": "def save_binary(self, package, resource, binary: bytes): filename = self._filename(package, resource) self._write(filename, binary)", - "docstring": "Save raw bytes to the package. Args: package (str): The name of module package this resource should go it (e.g. ``). resource (str): A unique name for the resource, used to identify it to load. binary (str): The data to save.", - "type": "method", - "file_path": "pytorch\\torch\\package\\package_exporter.py", - "ast_data": "FunctionDef name:save_binary arguments arg:self arg:package arg:resource arg:binary type:bytes Assign Call call:_filename" - }, - { - "library": "tensorflow", - "name": "write_raw_pb", - "source_code": "@tf_export('summary.experimental.write_raw_pb', v1 = []) def write_raw_pb(tensor, step = None, name = None): with ops.name_scope(name, 'write_raw_pb') as scope: if _summary_state.writer is None: return constant_op.constant(False) if step is None: step = get_step() if step is None: raise ValueError('No step set. Please specify one either through the `step` argument or through tf.summary.experimental.set_step()') def record(): with ops.device('cpu: 0'): raw_summary_op = gen_summary_ops.write_raw_proto_summary(_summary_state.writer._resource, step, array_ops.identity(tensor), name = scope) with ops.control_dependencies([raw_summary_op]): return constant_op.constant(True) with ops.device('cpu: 0'): op = smart_cond.smart_cond(should_record_summaries(), record, _nothing, name = 'summary_cond') if not context.executing_eagerly(): ops.add_to_collection(ops.GraphKeys._SUMMARY_COLLECTION, op) return op", - "docstring": "Writes a summary using raw protocol buffers. Experimental: this exists to support the usage of V1-style manual summary writing (via the construction of a protocol buffer) with the V2 summary writing API. Args: tensor: the string Tensor holding one or more serialized protobufs step: Explicit -castable monotonic step value for this summary. If omitted, this defaults to , which must not be None. name: Optional string name for this op. Returns: True on success, or false if no summary was written because no default summary writer was available. Raises: ValueError: if a default writer exists, but no step was provided and is None.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\summary_ops_v2.py", - "ast_data": "FunctionDef name:write_raw_pb arguments arg:tensor arg:step arg:name Call call:tf_export With If Compare op:Is Return return:yes If Compare op:Is Assign Call call:get_step If Compare op:Is Raise raises:ValueError('No step set. Please specify one either through the `step` argument or through tf.summary.experimental.set_step()') FunctionDef name:record arguments With Assign Call call:write_raw_proto_summary With Return return:yes With Assign Call call:smart_cond If Return return:yes" - }, - { - "library": "tensorflow", - "name": "train_on_batch", - "source_code": "def train_on_batch(model, inputs, targets, sample_weights = None, output_loss_metrics = None): inputs = training_utils_v1.cast_to_model_input_dtypes(inputs, model) outs, total_loss, output_losses, masks = _process_single_batch(model, inputs, targets, sample_weights = sample_weights, training = True, output_loss_metrics = output_loss_metrics) if not isinstance(outs, list): outs = [outs] metrics_results = _eager_metrics_fn(model, outs, targets, sample_weights = sample_weights, masks = masks) total_loss = nest.flatten(total_loss) return {'total_loss': total_loss, 'output_losses': output_losses, 'metrics': metrics_results}", - "docstring": "Calculates the loss and gradient updates for one input batch. Args: model: Model whose loss has to be calculated. inputs: Input batch data. targets: Target batch data. sample_weights: Sample weight batch data. output_loss_metrics: List of metrics that are used to aggregated output loss values. Returns: Dict with three items: 'total_loss': list with a single tensor for overall loss, 'output_losses': list of tensors for loss corresponding to each of the model output. Could be a empty list when model has only one output. 'metrics': list of tensors for metric specified.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_eager_v1.py", - "ast_data": "FunctionDef name:train_on_batch arguments arg:model arg:inputs arg:targets arg:sample_weights arg:output_loss_metrics Assign Call call:cast_to_model_input_dtypes Assign Call call:_process_single_batch If Assign Assign Call call:_eager_metrics_fn Assign Call call:flatten Return return:yes" - }, - { - "library": "authlib", - "name": "scope_to_list", - "source_code": "def scope_to_list(scope): if isinstance(scope, (tuple, list, set)): return [to_unicode(s) for s in scope] elif scope is None: return None return scope.strip().split()", - "docstring": "Convert a space separated string to a list of scopes.", - "type": "function", - "file_path": "authlib\\authlib\\oauth2\\rfc6749\\util.py", - "ast_data": "FunctionDef name:scope_to_list arguments arg:scope If Call call:isinstance Return return:yes If Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "SubprocessHandler", - "source_code": "class SubprocessHandler: def __init__(self, entrypoint: str, args: tuple, env: dict[str, str], stdout: Optional[str], stderr: Optional[str], local_rank_id: int): self._stdout = open(stdout, 'w') if stdout else None self._stderr = open(stderr, 'w') if stderr else None env_vars = os.environ.copy() env_vars.update(env) args_str = (entrypoint, *[str(e) for e in args]) self.local_rank_id = local_rank_id self.proc: subprocess.Popen = self._popen(args_str, env_vars) def _popen(self, args: tuple, env: dict[str, str]) -> subprocess.Popen: kwargs: dict[str, Any] = {} if not IS_WINDOWS: kwargs['start_new_session'] = True return subprocess.Popen(args = args, env = env, stdout = self._stdout, stderr = self._stderr, **kwargs) def close(self, death_sig: Optional[signal.Signals] = None) -> None: if not death_sig: death_sig = _get_default_signal() if IS_WINDOWS: self.proc.send_signal(death_sig) else: os.killpg(self.proc.pid, death_sig) if self._stdout: self._stdout.close() if self._stderr: self._stderr.close()", - "docstring": "Convenience wrapper around python's ``. Keeps track of meta-objects associated to the process (e.g. stdout and stderr redirect fds).", - "type": "class", - "file_path": "pytorch\\torch\\distributed\\elastic\\multiprocessing\\subprocess_handler\\subprocess_handler.py", - "ast_data": "ClassDef name:SubprocessHandler FunctionDef name:__init__ arguments arg:self arg:entrypoint type:str arg:args type:tuple arg:env type:dict[str, str] arg:stdout type:Optional[str] arg:stderr type:Optional[str] arg:local_rank_id type:int Assign Assign Assign Call call:copy Assign Assign FunctionDef name:_popen arguments arg:self arg:args type:tuple arg:env type:dict[str, str] If Assign Return return:yes FunctionDef name:close arguments arg:self arg:death_sig type:Optional[signal.Signals] If Assign Call call:_get_default_signal If If If" - }, - { - "library": "pandas", - "name": "select_describe_func", - "source_code": "def select_describe_func(data: Series) -> Callable: if is_bool_dtype(data.dtype): return describe_categorical_1d elif is_numeric_dtype(data): return describe_numeric_1d elif data.dtype.kind = = 'M' or isinstance(data.dtype, DatetimeTZDtype): return describe_timestamp_1d elif data.dtype.kind = = 'm': return describe_numeric_1d else: return describe_categorical_1d", - "docstring": "Select proper function for describing series based on data type. Parameters ---------- data : Series Series to be described.", - "type": "function", - "file_path": "pandas\\pandas\\core\\methods\\describe.py", - "ast_data": "FunctionDef name:select_describe_func arguments arg:data type:Series If Call call:is_bool_dtype Return return:yes If Call call:is_numeric_dtype Return return:yes If BoolOp Compare op:Eq Call call:isinstance Return return:yes If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "localize_input", - "source_code": "def localize_input(value, default = None): if isinstance(value, str): return value elif isinstance(value, bool): return str(value) elif isinstance(value, (decimal.Decimal, float, int)): return number_format(value) elif isinstance(value, datetime.datetime): format = default or get_format('DATETIME_INPUT_FORMATS')[0] format = sanitize_strftime_format(format) return value.strftime(format) elif isinstance(value, datetime.date): format = default or get_format('DATE_INPUT_FORMATS')[0] format = sanitize_strftime_format(format) return value.strftime(format) elif isinstance(value, datetime.time): format = default or get_format('TIME_INPUT_FORMATS')[0] return value.strftime(format) return value", - "docstring": "Check if an input value is a localizable type and return it formatted with the appropriate formatting string of the current locale.", - "type": "function", - "file_path": "django\\django\\utils\\formats.py", - "ast_data": "FunctionDef name:localize_input arguments arg:value arg:default If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Assign BoolOp Assign Call call:sanitize_strftime_format Return return:yes If Call call:isinstance Assign BoolOp Assign Call call:sanitize_strftime_format Return return:yes If Call call:isinstance Assign BoolOp Return return:yes Return return:yes" - }, - { - "library": "scikit-learn", - "name": "fit", - "source_code": "@_fit_context(prefer_skip_nested_validation = True) def fit(self, X, y = None): self._fit(X, y) return self", - "docstring": "Fit the transformer on . Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Input data, where is the number of samples and is the number of features. y : Ignored Not used, present for API consistency by convention. Returns ------- self : object Fitted estimator.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\impute\\_base.py", - "ast_data": "FunctionDef name:fit arguments arg:self arg:X arg:y Call call:_fit_context Return return:yes" - }, - { - "library": "tensorflow", - "name": "add_type_based_api_dispatcher", - "source_code": "def add_type_based_api_dispatcher(target): if hasattr(target, TYPE_BASED_DISPATCH_ATTR): raise ValueError(f'{target} already has a type-based API dispatcher.') _, unwrapped = tf_decorator.unwrap(target) target_argspec = tf_inspect.getargspec(unwrapped) if target_argspec.varargs or target_argspec.keywords: return target setattr(target, TYPE_BASED_DISPATCH_ATTR, _api_dispatcher.PythonAPIDispatcher(unwrapped.__name__, target_argspec.args, target_argspec.defaults)) _TYPE_BASED_DISPATCH_SIGNATURES[target] = collections.defaultdict(list) return target", - "docstring": "Adds a PythonAPIDispatcher to the given TensorFlow API function.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py", - "ast_data": "FunctionDef name:add_type_based_api_dispatcher arguments arg:target If Call call:hasattr Raise raises:ValueError(f'{target} already has a type-based API dispatcher.') Assign Call call:unwrap Assign Call call:getargspec If BoolOp Return return:yes Assign Call call:defaultdict Return return:yes" - }, - { - "library": "tensorflow", - "name": "on_run_start", - "source_code": "def on_run_start(self, request): self._is_run_start = True self._update_run_calls_state(request.run_call_count, request.fetches, request.feed_dict, is_callable_runner = request.is_callable_runner) if self._active_tensor_filter: return self._active_tensor_filter_run_start_response self._exit_if_requested_by_user() if self._run_call_count > 1 and (not self._skip_debug): if self._run_through_times > 0: return framework.OnRunStartResponse(framework.OnRunStartAction.NON_DEBUG_RUN, []) elif self._run_through_times = = 0: return self._run_start_response or framework.OnRunStartResponse(framework.OnRunStartAction.DEBUG_RUN, self._get_run_debug_urls()) if self._run_start_response is None: self._prep_cli_for_run_start() self._run_start_response = self._launch_cli() if self._active_tensor_filter: self._active_tensor_filter_run_start_response = self._run_start_response if self._run_through_times > 1: self._run_through_times - = 1 self._exit_if_requested_by_user() return self._run_start_response", - "docstring": "Overrides on-run-start callback. Args: request: An instance of . Returns: An instance of .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\local_cli_wrapper.py", - "ast_data": "FunctionDef name:on_run_start arguments arg:self arg:request Assign If Return return:yes If BoolOp Compare op:Gt If Compare op:Gt Return return:yes If Compare op:Eq Return return:yes If Compare op:Is Assign Call call:_launch_cli If Assign If Compare op:Gt Return return:yes" - }, - { - "library": "tensorflow", - "name": "apply_to_operation", - "source_code": "def apply_to_operation(self, operation): attr_value = attr_value_pb2.AttrValue(s = self._proto.SerializeToString()) operation._set_attr('_XlaSharding', attr_value)", - "docstring": "Applies this Sharding attribute to . Args: operation: A tf.Operation to add sharding annotation.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py", - "ast_data": "FunctionDef name:apply_to_operation arguments arg:self arg:operation Assign Call call:AttrValue" - }, - { - "library": "scipy", - "name": "LinearMixing", - "source_code": "class LinearMixing(GenericBroyden): def __init__(self, alpha = None): GenericBroyden.__init__(self) self.alpha = alpha def solve(self, f, tol = 0): return -f * self.alpha def matvec(self, f): return -f / self.alpha def rsolve(self, f, tol = 0): return -f * np.conj(self.alpha) def rmatvec(self, f): return -f / np.conj(self.alpha) def todense(self): return np.diag(np.full(self.shape[0], -1 / self.alpha)) def _update(self, x, f, dx, df, dx_norm, df_norm): pass", - "docstring": "Find a root of a function, using a scalar Jacobian approximation. .. warning:: This algorithm may be useful for specific problems, but whether it will work may depend strongly on the problem. Parameters ---------- %(params_basic)s alpha : float, optional The Jacobian approximation is (-1/alpha). %(params_extra)s See Also -------- root : Interface to root finding algorithms for multivariate functions. See `` in particular.", - "type": "class", - "file_path": "scipy\\scipy\\optimize\\_nonlin.py", - "ast_data": "ClassDef name:LinearMixing FunctionDef name:__init__ arguments arg:self arg:alpha Assign FunctionDef name:solve arguments arg:self arg:f arg:tol Return return:yes FunctionDef name:matvec arguments arg:self arg:f Return return:yes FunctionDef name:rsolve arguments arg:self arg:f arg:tol Return return:yes FunctionDef name:rmatvec arguments arg:self arg:f Return return:yes FunctionDef name:todense arguments arg:self Return return:yes FunctionDef name:_update arguments arg:self arg:x arg:f arg:dx arg:df arg:dx_norm arg:df_norm" - }, - { - "library": "authlib", - "name": "validate_service_documentation", - "source_code": "def validate_service_documentation(self): value = self.get('service_documentation') if value and (not is_valid_url(value)): raise ValueError('\"service_documentation\" MUST be a URL')", - "docstring": "OPTIONAL. URL of a page containing human-readable information that developers might want or need to know when using the authorization server. In particular, if the authorization server does not support Dynamic Client Registration, then information on how to register clients needs to be provided in this documentation.", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py", - "ast_data": "FunctionDef name:validate_service_documentation arguments arg:self Assign Call call:get If BoolOp Raise raises:ValueError('\"service_documentation\" MUST be a URL')" - }, - { - "library": "kornia", - "name": "unproject", - "source_code": "def unproject(self, point_2d: Tensor, depth: Tensor) -> Tensor: P = self.intrinsics @ self.extrinsics P_inv = _torch_inverse_cast(P) return transform_points(P_inv, convert_points_to_homogeneous(point_2d) * depth)", - "docstring": "Unproject a 2d point in 3d. Transform coordinates in the pixel frame to the world frame. Args: point_2d: tensor containing the 2d to be projected to world coordinates. The shape of the tensor can be :math:. depth: tensor containing the depth value of each 2d points. The tensor shape must be equal to point2d :math:. normalize: whether to normalize the pointcloud. This must be set to when the depth is represented as the Euclidean ray length from the camera position. Returns: tensor of (x, y, z) world coordinates with shape :math:. Example: >>> _ = torch.manual_seed(0) >>> x = torch.rand(1, 2) >>> depth = torch.ones(1, 1) >>> K = torch.eye(4)[None] >>> E = torch.eye(4)[None] >>> h = torch.ones(1) >>> w = torch.ones(1) >>> pinhole = kornia.geometry.camera.PinholeCamera(K, E, h, w) >>> pinhole.unproject(x, depth) tensor([[0.4963, 0.7682, 1.0000]])", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py", - "ast_data": "FunctionDef name:unproject arguments arg:self arg:point_2d type:Tensor arg:depth type:Tensor Assign Assign Call call:_torch_inverse_cast Return return:yes" - }, - { - "library": "pandas", - "name": "construct_array_type", - "source_code": "@classmethod def construct_array_type(cls) -> type_t[Categorical]: from pandas import Categorical return Categorical", - "docstring": "Return the array type associated with this dtype. Returns ------- type", - "type": "method", - "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", - "ast_data": "FunctionDef name:construct_array_type arguments arg:cls Return return:yes" - }, - { - "library": "coconut", - "name": "handle_and_manage", - "source_code": "def handle_and_manage(item, handler, manager, **kwargs): return manage(attach(item, handler), manager, **kwargs)", - "docstring": "Attach a handler and a manager to the given parse item.", - "type": "function", - "file_path": "coconut\\coconut\\compiler\\util.py", - "ast_data": "FunctionDef name:handle_and_manage arguments arg:item arg:handler arg:manager kwarg:kwargs Return return:yes" - }, - { - "library": "django", - "name": "get_version", - "source_code": "def get_version(version = None): version = get_complete_version(version) main = get_main_version(version) sub = '' if version[3] = = 'alpha' and version[4] = = 0: git_changeset = get_git_changeset() if git_changeset: sub = '.dev%s' % git_changeset elif version[3] ! = 'final': mapping = {'alpha': 'a', 'beta': 'b', 'rc': 'rc'} sub = mapping[version[3]] + str(version[4]) return main + sub", - "docstring": "Return a PEP 440-compliant version number from VERSION.", - "type": "function", - "file_path": "django\\django\\utils\\version.py", - "ast_data": "FunctionDef name:get_version arguments arg:version Assign Call call:get_complete_version Assign Call call:get_main_version Assign If BoolOp Compare op:Eq Compare op:Eq Assign Call call:get_git_changeset If Assign If Compare op:NotEq Assign Assign Return return:yes" - }, - { - "library": "flexx", - "name": "remote", - "source_code": "@property def remote(self): return self._remote", - "docstring": "Whether the asset is remote (client will load it from elsewhere). If True, the source specifies the URL.", - "type": "method", - "file_path": "flexx\\flexx\\app\\_asset.py", - "ast_data": "FunctionDef name:remote arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "poles", - "source_code": "def poles(self): if self._poles is None: m = self.weights.size B = np.eye(m + 1, dtype = self.weights.dtype) B[0, 0] = 0 E = np.zeros_like(B, dtype = np.result_type(self.weights, self._support_points)) E[0, 1:] = self.weights E[1:, 0] = 1 np.fill_diagonal(E[1:, 1:], self._support_points) pol = scipy.linalg.eigvals(E, B) self._poles = pol[np.isfinite(pol)] return self._poles", - "docstring": "Compute the poles of the rational approximation. Returns ------- poles : array Poles of the AAA approximation, repeated according to their multiplicity but not in any specific order.", - "type": "method", - "file_path": "scipy\\scipy\\interpolate\\_bary_rational.py", - "ast_data": "FunctionDef name:poles arguments arg:self If Compare op:Is Assign Assign Call call:eye Assign Assign Call call:zeros_like Assign Assign Assign Call call:eigvals Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "lazy_bind", - "source_code": "def lazy_bind(concrete_type, unbound_method): def lazy_binding_method(cpp_module, *args): def init_fn(script_module): orig_class = concrete_type.py_class for name in dir(orig_class): item = getattr(orig_class, name, None) if _jit_internal.is_ignored_fn(item): setattr(script_module, name, item) for name, value in concrete_type.get_constants().items(): setattr(script_module, name, value) script_module = torch.jit.RecursiveScriptModule._construct(cpp_module, init_fn) method = types.MethodType(unbound_method, script_module) return method(*args) lazy_binding_method.original_fn = unbound_method lazy_binding_method.__name__ = unbound_method.__name__ torch._jit_internal.copy_torchscript_modifier(unbound_method, lazy_binding_method) return lazy_binding_method", - "docstring": "Return a function that lazily binds to a provided Module IValue, then invokes the method. We do this so that any Python shenanigans that will poison type sharing are impossible at compile time.", - "type": "function", - "file_path": "pytorch\\torch\\jit\\_recursive.py", - "ast_data": "FunctionDef name:lazy_bind arguments arg:concrete_type arg:unbound_method FunctionDef name:lazy_binding_method arguments arg:cpp_module vararg:args FunctionDef name:init_fn arguments arg:script_module Assign For Call call:dir Assign Call call:getattr If Call call:is_ignored_fn For Call call:items Assign Call call:_construct Assign Call call:MethodType Return return:yes Assign Assign Return return:yes" - }, - { - "library": "authlib", - "name": "raise_error_response", - "source_code": "def raise_error_response(self, error): status = error.status_code body = json.dumps(dict(error.get_body())) headers = error.get_headers() raise_http_exception(status, body, headers)", - "docstring": "Raise HTTPException for OAuth2Error. Developers can re-implement this method to customize the error response. :param error: OAuth2Error :raise: HTTPException", - "type": "method", - "file_path": "authlib\\authlib\\integrations\\flask_oauth2\\resource_protector.py", - "ast_data": "FunctionDef name:raise_error_response arguments arg:self arg:error Assign Assign Call call:dumps Assign Call call:get_headers" - }, - { - "library": "scipy", - "name": "mfft", - "source_code": "@mfft.setter def mfft(self, n_: int): if not n_ > = self.m_num: raise ValueError(f'Attribute mfft = {n_} needs to be at least the ' + f'window length m_num = {self.m_num}!') self._mfft = n_", - "docstring": "Setter for the length of FFT utilized. See the property for further details.", - "type": "method", - "file_path": "scipy\\scipy\\signal\\_short_time_fft.py", - "ast_data": "FunctionDef name:mfft arguments arg:self arg:n_ type:int If Raise raises:ValueError(f'Attribute mfft={n_} needs to be at least the ' + f'window length m_num={self.m_num}!') Assign" - }, - { - "library": "tensorflow", - "name": "compute_dtype", - "source_code": "@property def compute_dtype(self): return self._compute_dtype", - "docstring": "The compute dtype of this policy. This is the dtype layers will do their computations in. Typically layers output tensors with the compute dtype as well. Note that even if the compute dtype is float16 or bfloat16, hardware devices may not do individual adds, multiplies, and other fundamental operations in float16 or bfloat16, but instead may do some of them in float32 for numeric stability. The compute dtype is the dtype of the inputs and outputs of the TensorFlow ops that the layer executes. Internally, many TensorFlow ops will do certain internal calculations in float32 or some other device-internal intermediate format with higher precision than float16/bfloat16, to increase numeric stability. For example, a layer, when run on a GPU with a float16 compute dtype, will pass float16 inputs to . But, will do use float32 intermediate math. The performance benefit of float16 is still apparent, due to increased memory bandwidth and the fact modern GPUs have specialized hardware for computing matmuls on float16 inputs while still keeping intermediate computations in float32. Returns: The compute dtype of this policy, as a string.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\policy.py", - "ast_data": "FunctionDef name:compute_dtype arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "is_tensor_evenly_shardable", - "source_code": "def is_tensor_evenly_shardable(shape: Sequence[int], spec: DTensorSpec) -> bool: shards_map = [1] * len(shape) for i, placement in enumerate(spec.placements): if placement.is_shard(): shard_dim = cast(Shard, placement).dim shards_map[shard_dim] * = spec.mesh.size(i) for i, dim_size in enumerate(shape): if shards_map[i] > 1 and dim_size % shards_map[i] ! = 0: return False return True", - "docstring": "Check if the shape is evenly shardable according to the spec.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\utils.py", - "ast_data": "FunctionDef name:is_tensor_evenly_shardable arguments arg:shape type:Sequence[int] arg:spec type:DTensorSpec Assign For Call call:enumerate If Call call:is_shard Assign For Call call:enumerate If BoolOp Compare op:Gt Compare op:NotEq Return return:yes Return return:yes" - }, - { - "library": "sphinx", - "name": "pending_xref", - "source_code": "class pending_xref(nodes.Inline, nodes.Element): child_text_separator = ''", - "docstring": "Node for cross-references that cannot be resolved without complete information about all documents. These nodes are resolved before writing output, in BuildEnvironment.resolve_references.", - "type": "class", - "file_path": "sphinx\\sphinx\\addnodes.py", - "ast_data": "ClassDef name:pending_xref Assign" - }, - { - "library": "tensorflow", - "name": "all_gather", - "source_code": "def all_gather(t, group_size, group_key, instance_key, communication_hint = 'auto', timeout = 0): if group_size < 1: raise ValueError(f'Parameter `group_size` to all_gather must be at least 1. Received: {group_size}.') return gen_collective_ops.collective_gather(t, shape = [0], group_size = group_size, group_key = group_key, instance_key = instance_key, communication_hint = communication_hint.lower(), timeout_seconds = timeout)", - "docstring": "Accumulates tensors collectively, across devices, along first dimension. Args: t: the tensor to participate in the accumulation. group_size: the total number of tensors to be collectively accumulated. Each must reside on a different device. Should be a positive integer. group_key: an integer identifying the group of devices. instance_key: an integer identifying the participating group of Ops. communication_hint: preferred collective communication. The implementation may fall back to another mechanism. Options include , , and . timeout: a float. If set to a non zero, set a completion timeout to detect staleness. If the timer goes off, a DeadlineExceededError is raised. The timeout value in seconds. This feature is experimental. Returns: An Op implementing the distributed operation. Raises: ValueError: if any of the input parameter constraints are not met.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\collective_ops.py", - "ast_data": "FunctionDef name:all_gather arguments arg:t arg:group_size arg:group_key arg:instance_key arg:communication_hint arg:timeout If Compare op:Lt Raise raises:ValueError(f'Parameter `group_size` to all_gather must be at least 1. Received: {group_size}.') Return return:yes" - }, - { - "library": "pytorch", - "name": "pre_export_passes", - "source_code": "@abc.abstractmethod def pre_export_passes(self, options: ResolvedExportOptions, original_model: torch.nn.Module | Callable, fx_module: torch.fx.GraphModule, fx_module_args: Sequence[Any]): ...", - "docstring": "Applies pre-export passes to the FX graph. Pre-export passes are FX-to-FX graph transformations that make the graph more palatable for the FX-to-ONNX conversion. For example, it can be used to flatten model input/output, add explicit casts to the graph, replace/decompose operators, functionalize the graph, etc.", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\_internal\\_exporter_legacy.py", - "ast_data": "FunctionDef name:pre_export_passes arguments arg:self arg:options type:ResolvedExportOptions arg:original_model type:torch.nn.Module | Callable arg:fx_module type:torch.fx.GraphModule arg:fx_module_args type:Sequence[Any]" - }, - { - "library": "pytorch", - "name": "needs_unshard", - "source_code": "def needs_unshard(self) -> bool: if not self.uses_sharded_strategy: return False unsharded_flat_param = self._get_padded_unsharded_flat_param() already_unsharded = _same_storage_size(unsharded_flat_param, unsharded_flat_param.numel()) return not already_unsharded", - "docstring": "Return if the handle's flat parameter needs to be unsharded.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py", - "ast_data": "FunctionDef name:needs_unshard arguments arg:self If Return return:yes Assign Call call:_get_padded_unsharded_flat_param Assign Call call:_same_storage_size Return return:yes" - }, - { - "library": "kornia", - "name": "rgb_to_bgr", - "source_code": "def rgb_to_bgr(image: Tensor) -> Tensor: if not isinstance(image, Tensor): raise TypeError(f'Input type is not a Tensor. Got {type(image)}') if len(image.shape) < 3 or image.shape[-3] ! = 3: raise ValueError(f'Input size must have a shape of (*, 3, H, W).Got {image.shape}') return bgr_to_rgb(image)", - "docstring": "Convert a RGB image to BGR. .. image:: _static/img/rgb_to_bgr.png Args: image: RGB Image to be converted to BGRof of shape :math:. Returns: BGR version of the image with shape of shape :math:. Example: >>> input = torch.rand(2, 3, 4, 5) >>> output = rgb_to_bgr(input) # 2x3x4x5", - "type": "function", - "file_path": "kornia\\kornia\\color\\rgb.py", - "ast_data": "FunctionDef name:rgb_to_bgr arguments arg:image type:Tensor If Raise raises:TypeError(f'Input type is not a Tensor. Got {type(image)}') If BoolOp Compare op:Lt Compare op:NotEq Raise raises:ValueError(f'Input size must have a shape of (*, 3, H, W).Got {image.shape}') Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, dataset_fn, coordinator): def disallow_variable_creation(next_creator, **kwargs): raise ValueError('Creating variables in `dataset_fn` is not allowed.') if isinstance(dataset_fn, def_function.Function): with variable_scope.variable_creator_scope(disallow_variable_creation): dataset_fn = dataset_fn.get_concrete_function() elif not isinstance(dataset_fn, tf_function.ConcreteFunction): with variable_scope.variable_creator_scope(disallow_variable_creation): dataset_fn = def_function.function(dataset_fn).get_concrete_function() self._dataset_fn = dataset_fn self._coordinator = coordinator self._element_spec = None", - "docstring": "Makes an iterable from datasets created by the given function. Args: dataset_fn: A function that returns a . coordinator: a object, used to create dataset resources.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\coordinator\\values.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:dataset_fn arg:coordinator FunctionDef name:disallow_variable_creation arguments arg:next_creator kwarg:kwargs Raise raises:ValueError('Creating variables in `dataset_fn` is not allowed.') If Call call:isinstance With Assign Call call:get_concrete_function If With Assign Call call:get_concrete_function Assign Assign Assign" - }, - { - "library": "matplotlib", - "name": "host_axes", - "source_code": "def host_axes(*args, axes_class = Axes, figure = None, **kwargs): import matplotlib.pyplot as plt host_axes_class = host_axes_class_factory(axes_class) if figure is None: figure = plt.gcf() ax = host_axes_class(figure, *args, **kwargs) figure.add_axes(ax) return ax", - "docstring": "Create axes that can act as a hosts to parasitic axes. Parameters ---------- figure : Figure to which the axes will be added. Defaults to the current figure . *args, **kwargs Will be passed on to the underlying object creation.", - "type": "function", - "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\parasite_axes.py", - "ast_data": "FunctionDef name:host_axes arguments vararg:args kwarg:kwargs Assign Call call:host_axes_class_factory If Compare op:Is Assign Call call:gcf Assign Call call:host_axes_class Return return:yes" - }, - { - "library": "authlib", - "name": "validate_subject_types_supported", - "source_code": "def validate_subject_types_supported(self): values = self.get('subject_types_supported') if values is None: raise ValueError('\"subject_types_supported\" is required') if not isinstance(values, list): raise ValueError('\"subject_types_supported\" MUST be JSON array') valid_types = {'pairwise', 'public'} if not valid_types.issuperset(set(values)): raise ValueError('\"subject_types_supported\" contains invalid values')", - "docstring": "REQUIRED. JSON array containing a list of the Subject Identifier types that this OP supports. Valid types include pairwise and public.", - "type": "method", - "file_path": "authlib\\authlib\\oidc\\discovery\\models.py", - "ast_data": "FunctionDef name:validate_subject_types_supported arguments arg:self Assign Call call:get If Compare op:Is Raise raises:ValueError('\"subject_types_supported\" is required') If Raise raises:ValueError('\"subject_types_supported\" MUST be JSON array') Assign If Raise raises:ValueError('\"subject_types_supported\" contains invalid values')" - }, - { - "library": "mongo", - "name": "seek", - "source_code": "async def seek(self, pos: int, whence: int = _SEEK_SET) -> int: if whence = = _SEEK_SET: new_pos = pos elif whence = = _SEEK_CUR: new_pos = self._position + pos elif whence = = _SEEK_END: new_pos = int(self.length) + pos else: raise OSError(22, 'Invalid value for `whence`') if new_pos < 0: raise OSError(22, 'Invalid value for `pos` - must be positive') if new_pos = = self._position: return new_pos self._position = new_pos self._buffer = EMPTY self._buffer_pos = 0 if self._chunk_iter: await self._chunk_iter.close() self._chunk_iter = None return new_pos", - "docstring": "Set the current position of this file. :param pos: the position (or offset if using relative positioning) to seek to :param whence: where to seek from. :attr: (`os.SEEK_CURos.SEEK_ENDio.IOBase.seek`.", - "type": "method", - "file_path": "mongo\\gridfs\\asynchronous\\grid_file.py", - "ast_data": "AsyncFunctionDef name:seek arguments arg:self arg:pos type:int arg:whence type:int If Compare op:Eq Assign If Compare op:Eq Assign If Compare op:Eq Assign Raise raises:OSError(22, 'Invalid value for `whence`') If Compare op:Lt Raise raises:OSError(22, 'Invalid value for `pos` - must be positive') If Compare op:Eq Return return:yes Assign Assign Assign If Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, rad = 0.0): self.rad = rad", - "docstring": "Parameters ---------- rad : float Curvature of the curve.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:rad Assign" - }, - { - "library": "kornia", - "name": "STEFunction", - "source_code": "class STEFunction(Function): @staticmethod def forward(ctx: Any, input: Tensor, output: Tensor, grad_fn: Optional[Callable[..., Any]] = None) -> Tensor: ctx.in_shape = input.shape ctx.out_shape = output.shape ctx.grad_fn = grad_fn return output @staticmethod def backward(ctx: Any, grad_output: Tensor) -> Tuple[Tensor, Tensor, None]: if ctx.grad_fn is None: return (grad_output.sum_to_size(ctx.in_shape), grad_output.sum_to_size(ctx.out_shape), None) return (ctx.grad_fn(grad_output.sum_to_size(ctx.in_shape)), ctx.grad_fn(grad_output.sum_to_size(ctx.out_shape)), None)", - "docstring": "Straight-Through Estimation (STE) function. STE bridges the gradients between the input tensor and output tensor as if the function was an identity function. Meanwhile, advanced gradient functions are also supported. e.g. the output gradients can be mapped into [-1, 1] with `` estimated from STE. >>> input = torch.randn(4, requires_grad = True) >>> output = torch.sign(input) >>> loss = output.mean() >>> loss.backward() >>> input.grad tensor([0., 0., 0., 0.]) >>> with torch.no_grad(): ... output = torch.sign(input) >>> out_est = STEFunction.apply(input, output) >>> loss = out_est.mean() >>> loss.backward() >>> input.grad tensor([0.2500, 0.2500, 0.2500, 0.2500])", - "type": "class", - "file_path": "kornia\\kornia\\grad_estimator\\ste.py", - "ast_data": "ClassDef name:STEFunction FunctionDef name:forward arguments arg:ctx type:Any arg:input type:Tensor arg:output type:Tensor arg:grad_fn type:Optional[Callable[..., Any]] Assign Assign Assign Return return:yes FunctionDef name:backward arguments arg:ctx type:Any arg:grad_output type:Tensor If Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "algorithms", - "name": "get_sum", - "source_code": "def get_sum(self, bit_tree, i): s = 0 i = i + 1 while i > 0: s + = bit_tree[i] i - = i & -i return s", - "docstring": "Returns sum of arr[0..index]. This function assumes that the array is preprocessed and partial sums of array elements are stored in bit_tree[].", - "type": "method", - "file_path": "algorithms\\algorithms\\tree\\fenwick_tree\\fenwick_tree.py", - "ast_data": "FunctionDef name:get_sum arguments arg:self arg:bit_tree arg:i Assign Assign While Compare op:Gt Return return:yes" - }, - { - "library": "pygame", - "name": "get_arraytype", - "source_code": "def get_arraytype(): warnings.warn(DeprecationWarning('only numpy arrays are now supported, this function will be removed in a future version of the module')) return 'numpy'", - "docstring": "pygame.surfarray.get_arraytype(): return str DEPRECATED - only numpy arrays are now supported.", - "type": "function", - "file_path": "pygame\\src_py\\surfarray.py", - "ast_data": "FunctionDef name:get_arraytype arguments Return return:yes" - }, - { - "library": "cherrypy", - "name": "tail", - "source_code": "def tail(self, environ, start_response): return self.response_class(environ, start_response, self.cpapp)", - "docstring": "WSGI application callable for the actual CherryPy application. You probably shouldn't call this; call self.__call__ instead, so that any WSGI middleware in self.pipeline can run first.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\_cpwsgi.py", - "ast_data": "FunctionDef name:tail arguments arg:self arg:environ arg:start_response Return return:yes" - }, - { - "library": "pytorch", - "name": "LeakyReLU", - "source_code": "class LeakyReLU(Module): __constants__ = ['inplace', 'negative_slope'] inplace: bool negative_slope: float def __init__(self, negative_slope: float = 0.01, inplace: bool = False) -> None: super().__init__() self.negative_slope = negative_slope self.inplace = inplace def forward(self, input: Tensor) -> Tensor: return F.leaky_relu(input, self.negative_slope, self.inplace) def extra_repr(self) -> str: inplace_str = ', inplace = True' if self.inplace else '' return f'negative_slope = {self.negative_slope}{inplace_str}'", - "docstring": "Applies the LeakyReLU function element-wise. .. math:: \\text{LeakyReLU}(x) = \\max(0, x) + \\text{negative\\_slope} * \\min(0, x) or .. math:: \\text{LeakyReLU}(x) = \\begin{cases} x, & \\text{ if } x \\geq 0 \\\\ \\text{negative\\_slope} \\times x, & \\text{ otherwise } \\end{cases} Args: negative_slope: Controls the angle of the negative slope (which is used for negative input values). Default: 1e-2 inplace: can optionally do the operation in-place. Default: `(*)*(*)`, same shape as the input .. image:: ../scripts/activation_images/LeakyReLU.png Examples:: >>> m = nn.LeakyReLU(0.1) >>> input = torch.randn(2) >>> output = m(input)", - "type": "class", - "file_path": "pytorch\\torch\\nn\\modules\\activation.py", - "ast_data": "ClassDef name:LeakyReLU Assign FunctionDef name:__init__ arguments arg:self arg:negative_slope type:float arg:inplace type:bool Assign Assign FunctionDef name:forward arguments arg:self arg:input type:Tensor Return return:yes FunctionDef name:extra_repr arguments arg:self Assign Return return:yes" - }, - { - "library": "scikit-learn", - "name": "sample", - "source_code": "def sample(self, n_samples = 1): check_is_fitted(self) if n_samples < 1: raise ValueError(\"Invalid value for 'n_samples': %d . The sampling requires at least one sample.\" % self.n_components) _, n_features = self.means_.shape rng = check_random_state(self.random_state) n_samples_comp = rng.multinomial(n_samples, self.weights_) if self.covariance_type = = 'full': X = np.vstack([rng.multivariate_normal(mean, covariance, int(sample)) for mean, covariance, sample in zip(self.means_, self.covariances_, n_samples_comp)]) elif self.covariance_type = = 'tied': X = np.vstack([rng.multivariate_normal(mean, self.covariances_, int(sample)) for mean, sample in zip(self.means_, n_samples_comp)]) else: X = np.vstack([mean + rng.standard_normal(size = (sample, n_features)) * np.sqrt(covariance) for mean, covariance, sample in zip(self.means_, self.covariances_, n_samples_comp)]) y = np.concatenate([np.full(sample, j, dtype = int) for j, sample in enumerate(n_samples_comp)]) return (X, y)", - "docstring": "Generate random samples from the fitted Gaussian distribution. Parameters ---------- n_samples : int, default=1 Number of samples to generate. Returns ------- X : array, shape (n_samples, n_features) Randomly generated sample. y : array, shape (nsamples,) Component labels.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\mixture\\_base.py", - "ast_data": "FunctionDef name:sample arguments arg:self arg:n_samples If Compare op:Lt Raise raises:ValueError(\"Invalid value for 'n_samples': %d . The sampling requires at least one sample.\" % self.n_components) Assign Assign Call call:check_random_state Assign Call call:multinomial If Compare op:Eq Assign Call call:vstack If Compare op:Eq Assign Call call:vstack Assign Call call:vstack Assign Call call:concatenate Return return:yes" - }, - { - "library": "matplotlib", - "name": "StemContainer", - "source_code": "class StemContainer(Container): def __init__(self, markerline_stemlines_baseline, **kwargs): markerline, stemlines, baseline = markerline_stemlines_baseline self.markerline = markerline self.stemlines = stemlines self.baseline = baseline super().__init__(markerline_stemlines_baseline, **kwargs)", - "docstring": "Container for the artists created in a :meth: plot. The container can be treated like a namedtuple `~matplotlib.lines.Line2D~matplotlib.collections.LineCollection~matplotlib.lines.Line2D` The artist of the horizontal baseline.", - "type": "class", - "file_path": "matplotlib\\lib\\matplotlib\\container.py", - "ast_data": "ClassDef name:StemContainer FunctionDef name:__init__ arguments arg:self arg:markerline_stemlines_baseline kwarg:kwargs Assign Assign Assign Assign" - }, - { - "library": "pytorch", - "name": "statically_known_gt", - "source_code": "def statically_known_gt(self, left: Expr, right: Union[Expr, int]) -> bool: expr = left > right return self.is_expr_static_and_true(expr)", - "docstring": "Returns a bool indicating if it is sound to optimize as if left is greater than right.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\sizevars.py", - "ast_data": "FunctionDef name:statically_known_gt arguments arg:self arg:left type:Expr arg:right type:Union[Expr, int] Assign Compare op:Gt Return return:yes" - }, - { - "library": "matplotlib", - "name": "twinx", - "source_code": "def twinx(ax: matplotlib.axes.Axes | None = None) -> _AxesBase: if ax is None: ax = gca() ax1 = ax.twinx() return ax1", - "docstring": "Make and return a second Axes that shares the *x*-axis. The new Axes will overlay *ax* (or the current Axes if *ax* is *None*), and its ticks will be on the right. Examples -------- :doc:", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", - "ast_data": "FunctionDef name:twinx arguments arg:ax type:matplotlib.axes.Axes | None If Compare op:Is Assign Call call:gca Assign Call call:twinx Return return:yes" - }, - { - "library": "tensorflow", - "name": "register_read_only_resource_op", - "source_code": "def register_read_only_resource_op(op_type): RESOURCE_READ_OPS.add(op_type)", - "docstring": "Declares that does not update its touched resource.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\auto_control_deps_utils.py", - "ast_data": "FunctionDef name:register_read_only_resource_op arguments arg:op_type" - }, - { - "library": "tensorflow", - "name": "constant_value", - "source_code": "def constant_value(pred): if isinstance(pred, tensor.Tensor): return tensor_util.constant_value(pred) if pred in {0, 1}: return bool(pred) if isinstance(pred, bool): return pred if isinstance(pred, variables.Variable): return None raise TypeError('`pred` must be a Tensor, or a Python bool, or 1 or 0. Found instead: %s' % type(pred))", - "docstring": "Return the bool value for , or None if had a dynamic value. Args: pred: A scalar, either a Python bool or a TensorFlow boolean variable or tensor, or the Python integer 1 or 0. Returns: True or False if has a constant boolean value, None otherwise. Raises: TypeError: If is not a Variable, Tensor or bool, or Python integer 1 or 0.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\control_flow_util.py", - "ast_data": "FunctionDef name:constant_value arguments arg:pred If Call call:isinstance Return return:yes If Compare op:In Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes Raise raises:TypeError('`pred` must be a Tensor, or a Python bool, or 1 or 0. Found instead: %s' % type(pred))" - }, - { - "library": "pytorch", - "name": "mark_dirty", - "source_code": "def mark_dirty(self, *args: torch.Tensor): self.dirty_tensors = args", - "docstring": "Mark given tensors as modified in an in-place operation. This should be called at most once, in either the :func: or :func: methods, and all arguments should be inputs. Every tensor that's been modified in-place in a call to :func: should be given to this function, to ensure correctness of our checks. It doesn't matter whether the function is called before or after modification. Examples:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_AUTOGRAD) >>> class Inplace(Function): >>> @staticmethod >>> def forward(ctx, x): >>> x_npy = x.numpy() # x_npy shares storage with x >>> x_npy += 1 >>> ctx.mark_dirty(x) >>> return x >>> >>> @staticmethod >>> @once_differentiable >>> def backward(ctx, grad_output): >>> return grad_output >>> >>> a = torch.tensor(1., requires_grad=True, dtype=torch.double).clone() >>> b = a * a >>> Inplace.apply(a) # This would lead to wrong gradients! >>> # but the engine would not know unless we mark_dirty >>> # xdoctest: +SKIP >>> b.backward() # RuntimeError: one of the variables needed for gradient >>> # computation has been modified by an inplace operation", - "type": "method", - "file_path": "pytorch\\torch\\autograd\\function.py", - "ast_data": "FunctionDef name:mark_dirty arguments arg:self vararg:args Assign" - }, - { - "library": "tensorflow", - "name": "async_noop", - "source_code": "def async_noop(name = None): with ops.name_scope(name, 'async_noop') as name: cond_init_value = constant_op.constant(False, name = 'cond_init_value') func_graph_signature = [tensor_spec.TensorSpec(shape = (), dtype = dtypes.bool)] cond_graph = func_graph_module.func_graph_from_py_func('cond_graph', lambda x: x, [cond_init_value], {}, signature = func_graph_signature, func_graph = util.WhileCondFuncGraph('cond_graph', collections = ops.get_default_graph()._collections), add_control_dependencies = False) body_graph = func_graph_module.func_graph_from_py_func('body_graph', lambda x: x, [cond_init_value], {}, signature = func_graph_signature, func_graph = util.WhileBodyFuncGraph('body_graph', collections = ops.get_default_graph()._collections), add_control_dependencies = False) while_op, _ = util.get_op_and_outputs(gen_functional_ops._while([cond_init_value], util.create_new_tf_function(cond_graph), util.create_new_tf_function(body_graph), output_shapes = [[]], name = name)) util.maybe_set_lowering_attr(while_op, lower_using_switch_merge = False) return while_op", - "docstring": "Returns a no-op that is implemented as an async kernel. This operation may be useful to implement \"aggressive inter-op parallelism\" because it will cause any immediate downstream operations to be scheduled on different threads. Args: name: The name of the operation.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py", - "ast_data": "FunctionDef name:async_noop arguments arg:name With Assign Call call:constant Assign Assign Call call:func_graph_from_py_func Assign Call call:func_graph_from_py_func Assign Call call:get_op_and_outputs Return return:yes" - }, - { - "library": "pandas", - "name": "to_frame", - "source_code": "def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: columns: Index if name is lib.no_default: name = self.name if name is None: columns = default_index(1) else: columns = Index([name]) else: columns = Index([name]) mgr = self._mgr.to_2d_mgr(columns) df = self._constructor_expanddim_from_mgr(mgr, axes = mgr.axes) return df.__finalize__(self, method = 'to_frame')", - "docstring": "Convert Series to DataFrame. Parameters ---------- name : object, optional The passed name should substitute for the series name (if it has one). Returns ------- DataFrame DataFrame representation of Series. See Also -------- Series.to_dict : Convert Series to dict object. Examples -------- >>> s = pd.Series([\"a\", \"b\", \"c\"], name=\"vals\") >>> s.to_frame() vals 0 a 1 b 2 c", - "type": "method", - "file_path": "pandas\\pandas\\core\\series.py", - "ast_data": "FunctionDef name:to_frame arguments arg:self arg:name type:Hashable If Compare op:Is Assign If Compare op:Is Assign Call call:default_index Assign Call call:Index Assign Call call:Index Assign Call call:to_2d_mgr Assign Call call:_constructor_expanddim_from_mgr Return return:yes" - }, - { - "library": "django", - "name": "make_hashable", - "source_code": "def make_hashable(value): if isinstance(value, dict): return tuple([(key, make_hashable(nested_value)) for key, nested_value in sorted(value.items())]) try: hash(value) except TypeError: if isinstance(value, Iterable): return tuple(map(make_hashable, value)) raise return value", - "docstring": "Attempt to make value hashable or raise a TypeError if it fails. The returned value should generate the same hash for equal values.", - "type": "function", - "file_path": "django\\django\\utils\\hashable.py", - "ast_data": "FunctionDef name:make_hashable arguments arg:value If Call call:isinstance Return return:yes Try ExceptHandler If Call call:isinstance Return return:yes Raise Return return:yes" - }, - { - "library": "pytorch", - "name": "failing", - "source_code": "def failing(self) -> bool: return self = = Status.FAILED_COMPILE or self = = Status.FAILED_RUN_EAGER_EXCEPTION or self = = Status.FAILED_RUN_COMPILE_EXCEPTION or (self = = Status.FAILED_RUN_RETURN)", - "docstring": "Convenience method to check whether these status represent failure.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\fuzzer.py", - "ast_data": "FunctionDef name:failing arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, ax, *args, **kwargs): super().__init__(ax, *args, **kwargs)", - "docstring": "Draw triangular grid contour lines or filled regions, depending on whether keyword arg *filled* is False (default) or True. The first argument of the initializer must be an object. The remaining arguments and keyword arguments are described in the docstring of .", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\tri\\_tricontour.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:ax vararg:args kwarg:kwargs" - }, - { - "library": "pytorch", - "name": "state_dict_hook", - "source_code": "def state_dict_hook(module, destination, prefix, local_metadata): for submodule_name, submodule in module.named_modules(): for attr_name, attr in submodule.__dict__.items(): if isinstance(attr, ShardedTensor): mod_prefix = prefix + submodule_name key = mod_prefix + ('.' if mod_prefix else '') + attr_name destination[key] = attr", - "docstring": "Hook to add ShardedTensor to Module's `torch.nn.Module._register_state_dict_hook`.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\__init__.py", - "ast_data": "FunctionDef name:state_dict_hook arguments arg:module arg:destination arg:prefix arg:local_metadata For Call call:named_modules For Call call:items If Call call:isinstance Assign Assign Assign" - }, - { - "library": "pytorch", - "name": "index_expanded_dims_and_copy_", - "source_code": "def index_expanded_dims_and_copy_(dst: torch.Tensor, src: torch.Tensor, expanded_dims: list[int]) -> None: dst = index_expanded_dims(dst, expanded_dims) src = index_expanded_dims(src, expanded_dims) dst.copy_(src)", - "docstring": "Index into expanded dimensions of both dst and src then copy_", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\compile_fx.py", - "ast_data": "FunctionDef name:index_expanded_dims_and_copy_ arguments arg:dst type:torch.Tensor arg:src type:torch.Tensor arg:expanded_dims type:list[int] Assign Call call:index_expanded_dims Assign Call call:index_expanded_dims" - }, - { - "library": "pytorch", - "name": "extract_tensor_metadata_for_cache_key", - "source_code": "def extract_tensor_metadata_for_cache_key(t: Tensor) -> TensorMetadata: meta = extract_tensor_metadata(t) if not hasattr(t, '_is_inductor_static'): meta = dataclasses.replace(meta, storage_offset = 0, storage_bytes = None) return meta", - "docstring": "Extracts the tensor metadata and removes fields of the TensorMetadata that are not needed for caching", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\codecache.py", - "ast_data": "FunctionDef name:extract_tensor_metadata_for_cache_key arguments arg:t type:Tensor Assign Call call:extract_tensor_metadata If Assign Call call:replace Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_default_locators_and_formatters", - "source_code": "def set_default_locators_and_formatters(self, axis): raise NotImplementedError()", - "docstring": "Set the locators and formatters of *axis* to instances suitable for this scale.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\scale.py", - "ast_data": "FunctionDef name:set_default_locators_and_formatters arguments arg:self arg:axis Raise raises:NotImplementedError()" - }, - { - "library": "kornia", - "name": "gaussian", - "source_code": "def gaussian(window_size: int, sigma: Tensor | float, *, mean: Optional[Union[Tensor, float]] = None, device: Optional[Device] = None, dtype: Optional[Dtype] = None) -> Tensor: if isinstance(sigma, float): sigma = tensor([[sigma]], device = device, dtype = dtype) KORNIA_CHECK_IS_TENSOR(sigma) KORNIA_CHECK_SHAPE(sigma, ['B', '1']) batch_size = sigma.shape[0] mean = float(window_size // 2) if mean is None else mean if isinstance(mean, float): mean = tensor([[mean]], device = sigma.device, dtype = sigma.dtype) KORNIA_CHECK_IS_TENSOR(mean) KORNIA_CHECK_SHAPE(mean, ['B', '1']) x = (torch.arange(window_size, device = sigma.device, dtype = sigma.dtype) - mean).expand(batch_size, -1) if window_size % 2 = = 0: x = x + 0.5 gauss = torch.exp(-x.pow(2.0) / (2 * sigma.pow(2.0))) return gauss / gauss.sum(-1, keepdim = True)", - "docstring": "Compute the gaussian values based on the window and sigma values. Args: window_size: the size which drives the filter amount. sigma: gaussian standard deviation. If a tensor, should be in a shape :math: mean: Mean of the Gaussian function (center). If not provided, it defaults to window_size // 2. If a tensor, should be in a shape :math: device: This value will be used if sigma is a float. Device desired to compute. dtype: This value will be used if sigma is a float. Dtype desired for compute. Returns: A tensor withshape :math:, with Gaussian values.", - "type": "function", - "file_path": "kornia\\kornia\\filters\\kernels.py", - "ast_data": "FunctionDef name:gaussian arguments arg:window_size type:int arg:sigma type:Tensor | float If Call call:isinstance Assign Call call:tensor Assign Assign If Call call:isinstance Assign Call call:tensor Assign Call call:expand If Compare op:Eq Assign Assign Call call:exp Return return:yes" - }, - { - "library": "kornia", - "name": "save", - "source_code": "def save(self, images: Tensor, depth_maps: Optional[Union[Tensor, list[Tensor]]] = None, directory: Optional[str] = None, output_type: str = 'torch', depth_type: str = 'relative', max_depth: int = 80) -> None: outputs = self.visualize(images, depth_maps, output_type, depth_type = depth_type, max_depth = max_depth) self._save_outputs(images, directory, suffix = '_src') self._save_outputs(outputs, directory, suffix = '_depth')", - "docstring": "Save the segmentation results. Args: images: input tensor. depth_maps: estimated depths. output_type: type of the output. depth_type: 'metric' or 'relative' depth. max_depth: maximum depth value. Only valid for metric depth. directory: where to store outputs. Returns: output tensor.", - "type": "method", - "file_path": "kornia\\kornia\\models\\depth_estimation\\base.py", - "ast_data": "FunctionDef name:save arguments arg:self arg:images type:Tensor arg:depth_maps type:Optional[Union[Tensor, list[Tensor]]] arg:directory type:Optional[str] arg:output_type type:str arg:depth_type type:str arg:max_depth type:int Assign Call call:visualize" - }, - { - "library": "pytorch", - "name": "from_float", - "source_code": "@classmethod def from_float(cls, mod, use_precomputed_fake_quant = False): float_modules = [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear, torch.ao.nn.intrinsic.modules.fused.LinearReLU, torch.ao.nn.qat.dynamic.Linear] assert type(mod) in float_modules, 'nn.quantized.dynamic.Linear.from_float only works for one of' + str([float_mod.__name__ for float_mod in float_modules]) assert hasattr(mod, 'qconfig'), 'Input float module must have qconfig defined' if type(mod) = = nni.LinearReLU: mod = mod[0] if mod.qconfig is not None and mod.qconfig.weight is not None: weight_observer = mod.qconfig.weight() else: from torch.ao.quantization.qconfig import default_dynamic_qconfig weight_observer = default_dynamic_qconfig.weight() dtype = weight_observer.dtype assert dtype in [torch.qint8, torch.float16], f'The only supported dtypes for dynamic quantized linear are qint8 and float16 got: {dtype}' weight_observer(mod.weight) if dtype = = torch.qint8: qweight = _quantize_weight(mod.weight.float(), weight_observer) elif dtype = = torch.float16: qweight = mod.weight.float() else: raise RuntimeError('Unsupported dtype specified for dynamic quantized Linear!') qlinear = cls(mod.in_features, mod.out_features, dtype = dtype) qlinear.set_weight_bias(qweight, mod.bias) return qlinear", - "docstring": "Create a dynamic quantized module from a float module or qparams_dict Args: mod (Module): a float module, either produced by torch.ao.quantization utilities or provided by the user", - "type": "method", - "file_path": "pytorch\\torch\\ao\\nn\\quantized\\dynamic\\modules\\linear.py", - "ast_data": "FunctionDef name:from_float arguments arg:cls arg:mod arg:use_precomputed_fake_quant Assign If Compare op:Eq Assign If BoolOp Compare op:IsNot Compare op:IsNot Assign Call call:weight Assign Call call:weight Assign If Compare op:Eq Assign Call call:_quantize_weight If Compare op:Eq Assign Call call:float Raise raises:RuntimeError('Unsupported dtype specified for dynamic quantized Linear!') Assign Call call:cls Return return:yes" - }, - { - "library": "kornia", - "name": "trans_y", - "source_code": "@classmethod def trans_y(cls, y: Tensor) -> Se2: zs = zeros_like(y) return cls.trans(zs, y)", - "docstring": "Construct a y-axis translation. Args: y: the y-axis translation.", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\liegroup\\se2.py", - "ast_data": "FunctionDef name:trans_y arguments arg:cls arg:y type:Tensor Assign Call call:zeros_like Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_bbox_edge_pos", - "source_code": "@staticmethod def get_bbox_edge_pos(bbox, loc): x0, y0, x1, y1 = bbox.extents if loc = = 1: return (x1, y1) elif loc = = 2: return (x0, y1) elif loc = = 3: return (x0, y0) elif loc = = 4: return (x1, y0)", - "docstring": "Return the `.BboxConnector` constructor.", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\inset_locator.py", - "ast_data": "FunctionDef name:get_bbox_edge_pos arguments arg:bbox arg:loc Assign If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes" - }, - { - "library": "kornia", - "name": "extract_patches_simple", - "source_code": "def extract_patches_simple(img: Tensor, laf: Tensor, PS: int = 32, normalize_lafs_before_extraction: bool = True) -> Tensor: KORNIA_CHECK_LAF(laf) if normalize_lafs_before_extraction: nlaf = normalize_laf(laf, img) else: nlaf = laf _, ch, h, w = img.size() B, N, _, _ = laf.size() out = [] for i in range(B): grid = generate_patch_grid_from_normalized_LAF(img[i: i + 1], nlaf[i: i + 1], PS).to(img.device) out.append(F.grid_sample(img[i: i + 1].expand(grid.size(0), ch, h, w), grid, padding_mode = 'border', align_corners = False)) return concatenate(out, dim = 0).view(B, N, ch, PS, PS)", - "docstring": "Extract patches defined by LAFs from image tensor. No smoothing applied, huge aliasing (better use extract_patches_from_pyramid). Args: img: images, LAFs are detected in :math:. laf: :math:. PS: patch size. normalize_lafs_before_extraction: if True, lafs are normalized to image size. Returns: patches with shape :math:.", - "type": "function", - "file_path": "kornia\\kornia\\feature\\laf.py", - "ast_data": "FunctionDef name:extract_patches_simple arguments arg:img type:Tensor arg:laf type:Tensor arg:PS type:int arg:normalize_lafs_before_extraction type:bool If Assign Call call:normalize_laf Assign Assign Call call:size Assign Call call:size Assign For Call call:range Assign Call call:to Return return:yes" - }, - { - "library": "pandas", - "name": "validate_parse_dates_presence", - "source_code": "def validate_parse_dates_presence(parse_dates: bool | list, columns: Sequence[Hashable]) -> set: if not isinstance(parse_dates, list): return set() missing = set() unique_cols = set() for col in parse_dates: if isinstance(col, str): if col not in columns: missing.add(col) else: unique_cols.add(col) elif col in columns: unique_cols.add(col) else: unique_cols.add(columns[col]) if missing: missing_cols = ', '.join(sorted(missing)) raise ValueError(f\"Missing column provided to 'parse_dates': '{missing_cols}'\") return unique_cols", - "docstring": "Check if parse_dates are in columns. If user has provided names for parse_dates, check if those columns are available. Parameters ---------- columns : list List of names of the dataframe. Returns ------- The names of the columns which will get parsed later if a list is given as specification. Raises ------ ValueError If column to parse_date is not in dataframe.", - "type": "function", - "file_path": "pandas\\pandas\\io\\parsers\\base_parser.py", - "ast_data": "FunctionDef name:validate_parse_dates_presence arguments arg:parse_dates type:bool | list arg:columns type:Sequence[Hashable] If Return return:yes Assign Call call:set Assign Call call:set For If Call call:isinstance If Compare op:NotIn If Compare op:In If Assign Call call:join Raise raises:ValueError(f\"Missing column provided to 'parse_dates': '{missing_cols}'\") Return return:yes" - }, - { - "library": "mongo", - "name": "add_replace", - "source_code": "def add_replace(self, selector: Mapping[str, Any], replacement: Mapping[str, Any], upsert: Optional[bool], collation: Optional[Mapping[str, Any]] = None, hint: Union[str, dict[str, Any], None] = None, sort: Optional[Mapping[str, Any]] = None) -> None: validate_ok_for_replace(replacement) cmd: dict[str, Any] = {'q': selector, 'u': replacement} if upsert is not None: cmd['upsert'] = upsert if collation is not None: self.uses_collation = True cmd['collation'] = collation if hint is not None: self.uses_hint_update = True cmd['hint'] = hint if sort is not None: self.uses_sort = True cmd['sort'] = sort self.ops.append((_UPDATE, cmd))", - "docstring": "Create a replace document and add it to the list of ops.", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\bulk.py", - "ast_data": "FunctionDef name:add_replace arguments arg:self arg:selector type:Mapping[str, Any] arg:replacement type:Mapping[str, Any] arg:upsert type:Optional[bool] arg:collation type:Optional[Mapping[str, Any]] arg:hint type:Union[str, dict[str, Any], None] arg:sort type:Optional[Mapping[str, Any]] If Compare op:IsNot Assign If Compare op:IsNot Assign Assign If Compare op:IsNot Assign Assign If Compare op:IsNot Assign Assign" - }, - { - "library": "pytorch", - "name": "autocast", - "source_code": "class autocast(torch.amp.autocast_mode.autocast): @deprecated(\"`torch.cpu.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cpu', args...)` instead.\", category = FutureWarning) def __init__(self, enabled: bool = True, dtype: torch.dtype = torch.bfloat16, cache_enabled: bool = True): if torch._jit_internal.is_scripting(): self._enabled = enabled self.device = 'cpu' self.fast_dtype = dtype return super().__init__('cpu', enabled = enabled, dtype = dtype, cache_enabled = cache_enabled) def __enter__(self): if torch._jit_internal.is_scripting(): return self return super().__enter__() def __exit__(self, exc_type: Any, exc_val: Any, exc_tb: Any): if torch._jit_internal.is_scripting(): return return super().__exit__(exc_type, exc_val, exc_tb) def __call__(self, func): if torch._jit_internal.is_scripting(): return func return super().__call__(func)", - "docstring": "See :class:. `` instead.", - "type": "class", - "file_path": "pytorch\\torch\\cpu\\amp\\autocast_mode.py", - "ast_data": "ClassDef name:autocast FunctionDef name:__init__ arguments arg:self arg:enabled type:bool arg:dtype type:torch.dtype arg:cache_enabled type:bool Call call:deprecated If Call call:is_scripting Assign Assign Assign Return return:no FunctionDef name:__enter__ arguments arg:self If Call call:is_scripting Return return:yes Return return:yes FunctionDef name:__exit__ arguments arg:self arg:exc_type type:Any arg:exc_val type:Any arg:exc_tb type:Any If Call call:is_scripting Return return:no Return return:yes FunctionDef name:__call__ arguments arg:self arg:func If Call call:is_scripting Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_pagecount", - "source_code": "def get_pagecount(self): return self._n_figures", - "docstring": "Return the current number of pages in the multipage pdf file.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py", - "ast_data": "FunctionDef name:get_pagecount arguments arg:self Return return:yes" - }, - { - "library": "kornia", - "name": "RawToRgb2x2Downscaled", - "source_code": "class RawToRgb2x2Downscaled(Module): def __init__(self, cfa: CFA) -> None: super().__init__() self.cfa = cfa def forward(self, image: Tensor) -> Tensor: return raw_to_rgb_2x2_downscaled(image, cfa = self.cfa)", - "docstring": "Module version of the :func: function. The image width and height have to be divisible by two. The image data is assumed to be in the range of (0, 1). Shape: - image: :math: - output: :math: Example: >>> rawinput = torch.rand(2, 1, 4, 6) >>> rgb_downscale = RawToRgb2x2Downscaled(CFA.RG) >>> output = rgb_downscale(rawinput) # 2x3x2x3", - "type": "class", - "file_path": "kornia\\kornia\\color\\raw.py", - "ast_data": "ClassDef name:RawToRgb2x2Downscaled FunctionDef name:__init__ arguments arg:self arg:cfa type:CFA Assign FunctionDef name:forward arguments arg:self arg:image type:Tensor Return return:yes" - }, - { - "library": "pandas", - "name": "mean", - "source_code": "def mean(self, *, skipna: bool = True, axis: AxisInt | None = 0): if isinstance(self.dtype, PeriodDtype): raise TypeError(f\"mean is not implemented for {type(self).__name__} since the meaning is ambiguous. An alternative is obj.to_timestamp(how = 'start').mean()\") result = nanops.nanmean(self._ndarray, axis = axis, skipna = skipna, mask = self.isna()) return self._wrap_reduction_result(axis, result)", - "docstring": "Return the mean value of the Array. Parameters ---------- skipna : bool, default True Whether to ignore any NaT elements. axis : int, optional, default 0 Axis for the function to be applied on. Returns ------- scalar Timestamp or Timedelta. See Also -------- numpy.ndarray.mean : Returns the average of array elements along a given axis. Series.mean : Return the mean value in a Series. Notes ----- mean is only defined for Datetime and Timedelta dtypes, not for Period. Examples -------- For :class:: >>> idx = pd.date_range(\"2001-01-01 00:00\", periods=3) >>> idx DatetimeIndex(['2001-01-01', '2001-01-02', '2001-01-03'], dtype='datetime64[ns]', freq='D') >>> idx.mean() Timestamp('2001-01-02 00:00:00') For :class:: >>> tdelta_idx = pd.to_timedelta([1, 2, 3], unit=\"D\") >>> tdelta_idx TimedeltaIndex(['1 days', '2 days', '3 days'], dtype='timedelta64[ns]', freq=None) >>> tdelta_idx.mean() Timedelta('2 days 00:00:00')", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\datetimelike.py", - "ast_data": "FunctionDef name:mean arguments arg:self If Call call:isinstance Raise raises:TypeError(f\"mean is not implemented for {type(self).__name__} since the meaning is ambiguous. An alternative is obj.to_timestamp(how='start').mean()\") Assign Call call:nanmean Return return:yes" - }, - { - "library": "sphinx", - "name": "create_nojekyll_and_cname", - "source_code": "def create_nojekyll_and_cname(app: Sphinx, env: BuildEnvironment) -> None: if app.builder.format ! = 'html': return app.builder.outdir.joinpath('.nojekyll').touch() cname_path = app.builder.outdir / 'CNAME' domain = _get_domain_from_url(app.config.html_baseurl) if domain and (not domain.endswith('.github.io')): with open(cname_path, 'w', encoding = 'utf-8') as f: f.write(domain) else: cname_path.unlink(missing_ok = True)", - "docstring": "Manage the `html_baseurl` files from the output directory.", - "type": "function", - "file_path": "sphinx\\sphinx\\ext\\githubpages.py", - "ast_data": "FunctionDef name:create_nojekyll_and_cname arguments arg:app type:Sphinx arg:env type:BuildEnvironment If Compare op:NotEq Return return:no Assign Assign Call call:_get_domain_from_url If BoolOp With" - }, - { - "library": "tensorflow", - "name": "__call__", - "source_code": "def __call__(self, shape, dtype = None, **kwargs): self._validate_kwargs(kwargs, support_partition = self.support_partition) if dtype is not None: dtype = dtypes.as_dtype(dtype) return constant_op.constant(self.value, dtype = dtype, shape = shape)", - "docstring": "Returns a tensor object initialized as specified by the initializer. Args: shape: Shape of the tensor. dtype: Optional dtype of the tensor. If not provided the dtype of the tensor created will be the type of the inital value. **kwargs: Additional keyword arguments. Raises: TypeError: If the initializer cannot create a tensor of the requested dtype.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\init_ops_v2.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:shape arg:dtype kwarg:kwargs If Compare op:IsNot Assign Call call:as_dtype Return return:yes" - }, - { - "library": "pytorch", - "name": "RemoteCacheBackend", - "source_code": "class RemoteCacheBackend(Generic[_T]): def __init__(self) -> None: self._name = f'backend: {type(self).__name__}' @abstractmethod def _get(self, key: str) -> Optional[_T]: pass @abstractmethod def _put(self, key: str, data: _T) -> None: pass def get(self, key: str) -> Optional[_T]: try: value = self._get(key) cache_stats.get(self._name, value) except Exception: cache_stats.exception(self._name) raise return value def put(self, key: str, data: _T) -> None: try: self._put(key, data) cache_stats.put(self._name) except Exception: cache_stats.exception(self._name) raise", - "docstring": "A backend implementation for accessing a remote/distributed cache. Only works with bytes in/out. For structured data use a RemoteCache.", - "type": "class", - "file_path": "pytorch\\torch\\_inductor\\remote_cache.py", - "ast_data": "ClassDef name:RemoteCacheBackend FunctionDef name:__init__ arguments arg:self Assign FunctionDef name:_get arguments arg:self arg:key type:str FunctionDef name:_put arguments arg:self arg:key type:str arg:data type:_T FunctionDef name:get arguments arg:self arg:key type:str Try Assign Call call:_get ExceptHandler Raise Return return:yes FunctionDef name:put arguments arg:self arg:key type:str arg:data type:_T Try ExceptHandler Raise" - }, - { - "library": "schema", - "name": "validate", - "source_code": "def validate(self, data: Any, **kwargs: Any) -> Any: autos: List[str] = [] errors: List[Union[str, None]] = [] for sub_schema in self._build_schemas(): try: validation: Any = sub_schema.validate(data, **kwargs) self.match_count + = 1 if self.match_count > 1 and self.only_one: break return validation except SchemaError as _x: autos + = _x.autos errors + = _x.errors raise SchemaError(['%r did not validate %r' % (self, data)] + autos, [self._error.format(data) if self._error else None] + errors)", - "docstring": "Validate data using sub defined schema/expressions ensuring at least one value is valid. :param data: data to be validated by provided schema. :return: return validated data if not validation", - "type": "method", - "file_path": "schema\\schema\\__init__.py", - "ast_data": "FunctionDef name:validate arguments arg:self arg:data type:Any kwarg:kwargs For Call call:_build_schemas Try If BoolOp Compare op:Gt Return return:yes ExceptHandler Raise raises:SchemaError(['%r did not validate %r' % (self, data)] + autos, [self._error.format(data) if self._error else None] + errors)" - }, - { - "library": "matplotlib", - "name": "home", - "source_code": "def home(self): self.views[self.figure].home() self.positions[self.figure].home()", - "docstring": "Recall the first view and position from the stack.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", - "ast_data": "FunctionDef name:home arguments arg:self" - }, - { - "library": "pytorch", - "name": "__init__", - "source_code": "def __init__(self, cache_staged_state_dict: bool = False, type_check: bool = False): self.cache_staged_state_dict = cache_staged_state_dict self.type_check = type_check self.state_dict_cache: Optional[STATE_DICT_TYPE] = None", - "docstring": "Initializes the BlockingAsyncStager. Args: cache_staged_state_dict: Whether to cache the staged state_dict. This option decreases staging latency at the cost of increases memory usage. Additionally, if this parameter is set to True, it's the expectation that the stager is maintained and re-used for multiple dcp.async_save calls. Default to False. type_check: Whether to perform a type check during cpu_offload. Defaults to False.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\checkpoint\\staging.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:cache_staged_state_dict type:bool arg:type_check type:bool Assign Assign" - }, - { - "library": "tensorflow", - "name": "transform_feature", - "source_code": "def transform_feature(self, transformation_cache, state_manager): source_tensor = transformation_cache.get(self.source_column, state_manager) return math_ops._bucketize(source_tensor, boundaries = self.boundaries)", - "docstring": "Returns bucketized categorical tensor.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", - "ast_data": "FunctionDef name:transform_feature arguments arg:self arg:transformation_cache arg:state_manager Assign Call call:get Return return:yes" - }, - { - "library": "tensorflow", - "name": "clipvalue", - "source_code": "@property def clipvalue(self): return self._clipvalue", - "docstring": "or . If set, clips gradients to a maximum value.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\optimizer_v2.py", - "ast_data": "FunctionDef name:clipvalue arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "file_complete", - "source_code": "def file_complete(self, file_size): raise NotImplementedError('subclasses of FileUploadHandler must provide a file_complete() method')", - "docstring": "Signal that a file has completed. File size corresponds to the actual size accumulated by all the chunks. Subclasses should return a valid `` object.", - "type": "method", - "file_path": "django\\django\\core\\files\\uploadhandler.py", - "ast_data": "FunctionDef name:file_complete arguments arg:self arg:file_size Raise raises:NotImplementedError('subclasses of FileUploadHandler must provide a file_complete() method')" - }, - { - "library": "numpy", - "name": "get_npy_pkg_dir", - "source_code": "def get_npy_pkg_dir(): d = os.environ.get('NPY_PKG_CONFIG_PATH') if d is not None: return d spec = importlib.util.find_spec('numpy') d = os.path.join(os.path.dirname(spec.origin), '_core', 'lib', 'npy-pkg-config') return d", - "docstring": "Return the path where to find the npy-pkg-config directory. If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that is returned. Otherwise, a path inside the location of the numpy module is returned. The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining customized npy-pkg-config .ini files for the cross-compilation environment, and using them when cross-compiling.", - "type": "function", - "file_path": "numpy\\numpy\\distutils\\misc_util.py", - "ast_data": "FunctionDef name:get_npy_pkg_dir arguments Assign Call call:get If Compare op:IsNot Return return:yes Assign Call call:find_spec Assign Call call:join Return return:yes" - }, - { - "library": "tensorflow", - "name": "clear_op_callbacks", - "source_code": "def clear_op_callbacks(): for callback in context.context().op_callbacks: remove_op_callback(callback)", - "docstring": "Clear all op callbacks registered in the current thread.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\op_callbacks.py", - "ast_data": "FunctionDef name:clear_op_callbacks arguments For" - }, - { - "library": "kornia", - "name": "scale", - "source_code": "def scale(self, scale_factor: Tensor) -> 'PinholeCamera': intrinsics: Tensor = self.intrinsics.clone() intrinsics[..., 0, 0] * = scale_factor intrinsics[..., 1, 1] * = scale_factor intrinsics[..., 0, 2] * = scale_factor intrinsics[..., 1, 2] * = scale_factor height: Tensor = scale_factor * self.height.clone() width: Tensor = scale_factor * self.width.clone() return PinholeCamera(intrinsics, self.extrinsics, height, width)", - "docstring": "Scale the pinhole model. Args: scale_factor: a tensor with the scale factor. It has to be broadcastable with class members. The expected shape is :math: or :math:. Returns: the camera model with scaled parameters.", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py", - "ast_data": "FunctionDef name:scale arguments arg:self arg:scale_factor type:Tensor Return return:yes" - }, - { - "library": "scipy", - "name": "ZerosPolesGainDiscrete", - "source_code": "class ZerosPolesGainDiscrete(ZerosPolesGain, dlti): pass", - "docstring": "Discrete-time Linear Time Invariant system in zeros, poles, gain form. Represents the system as the discrete-time transfer function :math:, where :math: is the , :math: are the and :math: are the . Discrete-time systems inherit additional functionality from the class. Parameters ---------- *system : arguments The class can be instantiated with 1 or 3 arguments. The following gives the number of input arguments and their interpretation: * 1: system: (, or ) * 3: array_like: (zeros, poles, gain) dt: float, optional Sampling time [s] of the discrete-time systems. Defaults to (unspecified sampling time). Must be specified as a keyword argument, for example, `ZerosPolesGainABCDH(s) = \\frac{5(s - 1)(s - 2)}{(s - 3)(s - 4)}H(z) = \\frac{5(z - 1)(z - 2)}{(z - 3)(z - 4)}` with a sampling time of 0.1 seconds: >>> signal.ZerosPolesGain([1, 2], [3, 4], 5, dt=0.1) ZerosPolesGainDiscrete( array([1, 2]), array([3, 4]), 5, dt: 0.1 )", - "type": "class", - "file_path": "scipy\\scipy\\signal\\_ltisys.py", - "ast_data": "ClassDef name:ZerosPolesGainDiscrete" - }, - { - "library": "scrapy", - "name": "load", - "source_code": "def load(self, spider_name: str) -> type[Spider]: pass", - "docstring": "Return the Spider class for the given spider name. If the spider name is not found, it must raise a KeyError.", - "type": "method", - "file_path": "scrapy\\scrapy\\spiderloader.py", - "ast_data": "FunctionDef name:load arguments arg:self arg:spider_name type:str" - }, - { - "library": "tensorflow", - "name": "assert_no_leak_if_all_possibly_except_one", - "source_code": "@trace.trace_wrapper def assert_no_leak_if_all_possibly_except_one(self): self._python_memory_checker.assert_no_leak_if_all_possibly_except_one()", - "docstring": "Raises an exception if a leak is detected. This algorithm classifies a series of allocations as a leak if it's the same type(Python) or it happens at the same stack trace(C++) at every snapshot, but possibly except one snapshot.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\memory_checker.py", - "ast_data": "FunctionDef name:assert_no_leak_if_all_possibly_except_one arguments arg:self" - }, - { - "library": "scipy", - "name": "band_stop_obj", - "source_code": "def band_stop_obj(wp, ind, passb, stopb, gpass, gstop, type): _validate_gpass_gstop(gpass, gstop) passbC = passb.copy() passbC[ind] = wp nat = stopb * (passbC[0] - passbC[1]) / (stopb ** 2 - passbC[0] * passbC[1]) nat = min(abs(nat)) if type = = 'butter': GSTOP = 10 ** (0.1 * abs(gstop)) GPASS = 10 ** (0.1 * abs(gpass)) n = log10((GSTOP - 1.0) / (GPASS - 1.0)) / (2 * log10(nat)) elif type = = 'cheby': GSTOP = 10 ** (0.1 * abs(gstop)) GPASS = 10 ** (0.1 * abs(gpass)) n = arccosh(sqrt((GSTOP - 1.0) / (GPASS - 1.0))) / arccosh(nat) elif type = = 'ellip': GSTOP = 10 ** (0.1 * gstop) GPASS = 10 ** (0.1 * gpass) arg1 = sqrt((GPASS - 1.0) / (GSTOP - 1.0)) arg0 = 1.0 / nat d0 = special.ellipk([arg0 ** 2, 1 - arg0 ** 2]) d1 = special.ellipk([arg1 ** 2, 1 - arg1 ** 2]) n = d0[0] * d1[1] / (d0[1] * d1[0]) else: raise ValueError(f'Incorrect type: {type}') return n", - "docstring": "Band Stop Objective Function for order minimization. Returns the non-integer order for an analog band stop filter. Parameters ---------- wp : scalar Edge of passband . ind : int, {0, 1} Index specifying which edge to vary (0 or 1). passb : ndarray Two element sequence of fixed passband edges. stopb : ndarray Two element sequence of fixed stopband edges. gstop : float Amount of attenuation in stopband in dB. gpass : float Amount of ripple in the passband in dB. type : {'butter', 'cheby', 'ellip'} Type of filter. Returns ------- n : scalar Filter order (possibly non-integer). Notes ----- Band-stop filters are used in applications where certain frequency components need to be blocked while others are allowed; for instance, removing noise at specific frequencies while allowing the desired signal to pass through. The order of a filter often determines its complexity and accuracy. Determining the right order can be a challenge. This function aims to provide an appropriate order for an analog band stop filter. Examples -------- >>> import numpy as np >>> from scipy.signal import band_stop_obj >>> wp = 2 >>> ind = 1 >>> passb = np.array([1, 3]) >>> stopb = np.array([0.5, 4]) >>> gstop = 30 >>> gpass = 3 >>> filter_type = 'butter' >>> band_stop_obj(wp, ind, passb, stopb, gpass, gstop, filter_type) np.float64(-2.758504160760643)", - "type": "function", - "file_path": "scipy\\scipy\\signal\\_filter_design.py", - "ast_data": "FunctionDef name:band_stop_obj arguments arg:wp arg:ind arg:passb arg:stopb arg:gpass arg:gstop arg:type Assign Call call:copy Assign Assign Assign Call call:min If Compare op:Eq Assign Assign Assign If Compare op:Eq Assign Assign Assign If Compare op:Eq Assign Assign Assign Call call:sqrt Assign Assign Call call:ellipk Assign Call call:ellipk Assign Raise raises:ValueError(f'Incorrect type: {type}') Return return:yes" - }, - { - "library": "scipy", - "name": "step", - "source_code": "def step(self, X0 = None, T = None, N = None): return step(self, X0 = X0, T = T, N = N)", - "docstring": "Return the step response of a continuous-time system. See for details.", - "type": "method", - "file_path": "scipy\\scipy\\signal\\_ltisys.py", - "ast_data": "FunctionDef name:step arguments arg:self arg:X0 arg:T arg:N Return return:yes" - }, - { - "library": "tensorflow", - "name": "pid", - "source_code": "@property def pid(self) -> int: return self._pid", - "docstring": "ID of the process which created this tensor (an integer).", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py", - "ast_data": "FunctionDef name:pid arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "shard", - "source_code": "@torch.no_grad() def shard(self): flat_param = self.flat_param if not self.uses_sharded_strategy: self._init_shard_metadata(0, 0, flat_param.numel() - 1) else: _p_assert(flat_param.storage_offset() = = 0, 'The `FlatParameter` is not the sole occupant of its storage') sharded_flat_param, numel_padded = FlatParamHandle._get_shard(flat_param, self.rank, self.world_size) if not torch.distributed._functional_collectives.is_torchdynamo_compiling(): allocated = flat_param._typed_storage()._size() > 0 if allocated: flat_param._typed_storage()._resize_(0) flat_param.set_(sharded_flat_param) start_idx = sharded_flat_param.numel() * self.rank end_idx = sharded_flat_param.numel() * (self.rank + 1) - 1 self._init_shard_metadata(numel_padded, start_idx, end_idx) if self._use_orig_params: self._use_sharded_views()", - "docstring": "Shard the handle's `` is the sharded flat parameter. Shard metadata attributes are set for all sharding strategies.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\fsdp\\_flat_param.py", - "ast_data": "FunctionDef name:shard arguments arg:self Call call:no_grad Assign If Assign Call call:_get_shard If Assign Compare op:Gt If Assign Assign If" - }, - { - "library": "django", - "name": "make_debug_cursor", - "source_code": "def make_debug_cursor(self, cursor): return utils.CursorDebugWrapper(cursor, self)", - "docstring": "Create a cursor that logs all queries in self.queries_log.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\base.py", - "ast_data": "FunctionDef name:make_debug_cursor arguments arg:self arg:cursor Return return:yes" - }, - { - "library": "scipy", - "name": "pre_order", - "source_code": "def pre_order(self, func = lambda x: x.id): n = self.count curNode = [None] * (2 * n) lvisited = set() rvisited = set() curNode[0] = self k = 0 preorder = [] while k > = 0: nd = curNode[k] ndid = nd.id if nd.is_leaf(): preorder.append(func(nd)) k = k - 1 elif ndid not in lvisited: curNode[k + 1] = nd.left lvisited.add(ndid) k = k + 1 elif ndid not in rvisited: curNode[k + 1] = nd.right rvisited.add(ndid) k = k + 1 else: k = k - 1 return preorder", - "docstring": "Perform pre-order traversal without recursive function calls. When a leaf node is first encountered, ``. If not provided, the index of the original observation to which the node corresponds is used. Returns ------- L : list The pre-order traversal.", - "type": "method", - "file_path": "scipy\\scipy\\cluster\\hierarchy.py", - "ast_data": "FunctionDef name:pre_order arguments arg:self arg:func Assign Assign Assign Call call:set Assign Call call:set Assign Assign Assign While Compare op:GtE Assign Assign If Call call:is_leaf Assign If Compare op:NotIn Assign Assign If Compare op:NotIn Assign Assign Assign Return return:yes" - }, - { - "library": "django", - "name": "save_base", - "source_code": "def save_base(self, raw = False, force_insert = False, force_update = False, using = None, update_fields = None): using = using or router.db_for_write(self.__class__, instance = self) assert not (force_insert and (force_update or update_fields)) assert update_fields is None or update_fields cls = origin = self.__class__ if cls._meta.proxy: cls = cls._meta.concrete_model meta = cls._meta if not meta.auto_created: pre_save.send(sender = origin, instance = self, raw = raw, using = using, update_fields = update_fields) if meta.parents: context_manager = transaction.atomic(using = using, savepoint = False) else: context_manager = transaction.mark_for_rollback_on_error(using = using) with context_manager: parent_inserted = False if not raw: force_insert = self._validate_force_insert(force_insert) parent_inserted = self._save_parents(cls, using, update_fields, force_insert) updated = self._save_table(raw, cls, force_insert or parent_inserted, force_update, using, update_fields) self._state.db = using self._state.adding = False if not meta.auto_created: post_save.send(sender = origin, instance = self, created = not updated, update_fields = update_fields, raw = raw, using = using)", - "docstring": "Handle the parts of saving which should be done only once per save, yet need to be done in raw saves, too. This includes some sanity checks and signal sending. The 'raw' argument is telling save_base not to save any parent models and not to do any changes to the values before save. This is used by fixture loading.", - "type": "method", - "file_path": "django\\django\\db\\models\\base.py", - "ast_data": "FunctionDef name:save_base arguments arg:self arg:raw arg:force_insert arg:force_update arg:using arg:update_fields Assign BoolOp Call call:db_for_write Assign If Assign Assign If If Assign Call call:atomic Assign Call call:mark_for_rollback_on_error With Assign If Assign Call call:_validate_force_insert Assign Call call:_save_parents Assign Call call:_save_table Assign Assign If" - }, - { - "library": "tensorflow", - "name": "IteratorBase", - "source_code": "@tf_export('data.Iterator', v1 = []) class IteratorBase(collections_abc.Iterator, trackable.Trackable, composite_tensor.CompositeTensor, metaclass = abc.ABCMeta): @abc.abstractproperty def element_spec(self): raise NotImplementedError('Iterator.element_spec') @abc.abstractmethod def get_next(self): raise NotImplementedError('Iterator.get_next()') @abc.abstractmethod def get_next_as_optional(self): raise NotImplementedError('Iterator.get_next_as_optional()')", - "docstring": "Represents an iterator of a . is the primary mechanism for enumerating elements of a . It supports the Python Iterator protocol, which means it can be iterated over using a for-loop: >>> dataset = tf.data.Dataset.range(2) >>> for element in dataset: ... print(element) tf.Tensor(0, shape=(), dtype=int64) tf.Tensor(1, shape=(), dtype=int64) or by fetching individual elements explicitly via : >>> dataset = tf.data.Dataset.range(2) >>> iterator = iter(dataset) >>> print(iterator.get_next()) tf.Tensor(0, shape=(), dtype=int64) >>> print(iterator.get_next()) tf.Tensor(1, shape=(), dtype=int64) In addition, non-raising iteration is supported via , which returns the next element (if available) wrapped in a . >>> dataset = tf.data.Dataset.from_tensors(42) >>> iterator = iter(dataset) >>> optional = iterator.get_next_as_optional() >>> print(optional.has_value()) tf.Tensor(True, shape=(), dtype=bool) >>> optional = iterator.get_next_as_optional() >>> print(optional.has_value()) tf.Tensor(False, shape=(), dtype=bool)", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py", - "ast_data": "ClassDef name:IteratorBase Call call:tf_export FunctionDef name:element_spec arguments arg:self Raise raises:NotImplementedError('Iterator.element_spec') FunctionDef name:get_next arguments arg:self Raise raises:NotImplementedError('Iterator.get_next()') FunctionDef name:get_next_as_optional arguments arg:self Raise raises:NotImplementedError('Iterator.get_next_as_optional()')" - }, - { - "library": "pytorch", - "name": "LogNormal", - "source_code": "class LogNormal(TransformedDistribution): arg_constraints = {'loc': constraints.real, 'scale': constraints.positive} support = constraints.positive has_rsample = True base_dist: Normal def __init__(self, loc: Union[Tensor, float], scale: Union[Tensor, float], validate_args: Optional[bool] = None) -> None: base_dist = Normal(loc, scale, validate_args = validate_args) super().__init__(base_dist, ExpTransform(), validate_args = validate_args) def expand(self, batch_shape, _instance = None): new = self._get_checked_instance(LogNormal, _instance) return super().expand(batch_shape, _instance = new) @property def loc(self) -> Tensor: return self.base_dist.loc @property def scale(self) -> Tensor: return self.base_dist.scale @property def mean(self) -> Tensor: return (self.loc + self.scale.pow(2) / 2).exp() @property def mode(self) -> Tensor: return (self.loc - self.scale.square()).exp() @property def variance(self) -> Tensor: scale_sq = self.scale.pow(2) return scale_sq.expm1() * (2 * self.loc + scale_sq).exp() def entropy(self): return self.base_dist.entropy() + self.loc", - "docstring": "Creates a log-normal distribution parameterized by :attr: and :attr: where:: X ~ Normal(loc, scale) Y = exp(X) ~ LogNormal(loc, scale) Example:: >>> # xdoctest: +IGNORE_WANT(\"non-deterministic\") >>> m = LogNormal(torch.tensor([0.0]), torch.tensor([1.0])) >>> m.sample() # log-normal distributed with mean=0 and stddev=1 tensor([ 0.1046]) Args: loc (float or Tensor): mean of log of distribution scale (float or Tensor): standard deviation of log of the distribution", - "type": "class", - "file_path": "pytorch\\torch\\distributions\\log_normal.py", - "ast_data": "ClassDef name:LogNormal Assign Assign Assign FunctionDef name:__init__ arguments arg:self arg:loc type:Union[Tensor, float] arg:scale type:Union[Tensor, float] arg:validate_args type:Optional[bool] Assign Call call:Normal FunctionDef name:expand arguments arg:self arg:batch_shape arg:_instance Assign Call call:_get_checked_instance Return return:yes FunctionDef name:loc arguments arg:self Return return:yes FunctionDef name:scale arguments arg:self Return return:yes FunctionDef name:mean arguments arg:self Return return:yes FunctionDef name:mode arguments arg:self Return return:yes FunctionDef name:variance arguments arg:self Assign Call call:pow Return return:yes FunctionDef name:entropy arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_points", - "source_code": "def set_points(self, points): if np.any(self._points ! = points): self._points = points self.invalidate()", - "docstring": "Set the points of the bounding box directly from an array of the form ``. No error checking is performed, as this method is mainly for internal use.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", - "ast_data": "FunctionDef name:set_points arguments arg:self arg:points If Call call:any Assign" - }, - { - "library": "tensorflow", - "name": "read_execution_stack_trace", - "source_code": "def read_execution_stack_trace(self, execution): host_name = self._stack_frame_by_id[execution.stack_frame_ids[0]][0] return (host_name, [self._stack_frame_by_id[frame_id][1:] for frame_id in execution.stack_frame_ids])", - "docstring": "Read the stack trace of a given Execution object. Args: execution: The Execution object of interest. Returns: 1. The host name. 2. The stack trace, as a list of (file_path, lineno, func) tuples.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", - "ast_data": "FunctionDef name:read_execution_stack_trace arguments arg:self arg:execution Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "is_channels_last_contiguous", - "source_code": "def is_channels_last_contiguous(a: Tensor) -> bool: return is_channels_last_contiguous_2d(a) or is_channels_last_contiguous_3d(a)", - "docstring": "True when a tensor is channels-last contiguous. This requires that: - the tensor is conceptually either 4 (NHWC) or 5 (NDHWC) dimensions - if we name the tensor's dimensions NCHW or NCDHW, then the strides are such that the stride of the 'C' dimension (Cs) is 1 and the strides corresponding to each dimension (Xs) can be ordered Cs <= Ws <= Hs <= (Ds) <= Ns and are \"nested\" -- so Ws = Cs * Cl, where Cl is the length of the 'C' dimension, for example.", - "type": "function", - "file_path": "pytorch\\torch\\_prims_common\\__init__.py", - "ast_data": "FunctionDef name:is_channels_last_contiguous arguments arg:a type:Tensor Return return:yes" - }, - { - "library": "mongo", - "name": "ConnectionCheckOutStartedEvent", - "source_code": "class ConnectionCheckOutStartedEvent(_ConnectionEvent): __slots__ = ()", - "docstring": "Published when the driver starts attempting to check out a connection. :param address: The address (host, port) pair of the server this Connection is attempting to connect to. .. versionadded:: 3.9", - "type": "class", - "file_path": "mongo\\pymongo\\monitoring.py", - "ast_data": "ClassDef name:ConnectionCheckOutStartedEvent Assign" - }, - { - "library": "pytorch", - "name": "ContextlibContextManagerLocalGeneratorObjectVariable", - "source_code": "class ContextlibContextManagerLocalGeneratorObjectVariable(LocalGeneratorObjectVariable): pass", - "docstring": ".. note:: This is only used when the function is annotated with @contextlib.contextmanager It is a special case of a generator function as we do not allow return a context manager from a torch.compile function.", - "type": "class", - "file_path": "pytorch\\torch\\_dynamo\\variables\\functions.py", - "ast_data": "ClassDef name:ContextlibContextManagerLocalGeneratorObjectVariable" - }, - { - "library": "tensorflow", - "name": "swap_memory", - "source_code": "@property def swap_memory(self): return self._swap_memory", - "docstring": "True iff GPU-CPU memory swap is enabled for this while loop.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_ops.py", - "ast_data": "FunctionDef name:swap_memory arguments arg:self Return return:yes" - }, - { - "library": "scrapy", - "name": "dropped", - "source_code": "def dropped(self, item: Any, exception: BaseException, response: Response | None, spider: Spider) -> LogFormatterResult: if (level: = getattr(exception, 'log_level', None)) is None: level = spider.crawler.settings['DEFAULT_DROPITEM_LOG_LEVEL'] if isinstance(level, str): level = getattr(logging, level) return {'level': level, 'msg': DROPPEDMSG, 'args': {'exception': exception, 'item': item}}", - "docstring": "Logs a message when an item is dropped while it is passing through the item pipeline.", - "type": "method", - "file_path": "scrapy\\scrapy\\logformatter.py", - "ast_data": "FunctionDef name:dropped arguments arg:self arg:item type:Any arg:exception type:BaseException arg:response type:Response | None arg:spider type:Spider If Compare op:Is Assign If Call call:isinstance Assign Call call:getattr Return return:yes" - }, - { - "library": "tensorflow", - "name": "FailedPreconditionError", - "source_code": "@tf_export('errors.FailedPreconditionError') class FailedPreconditionError(OpError): def __init__(self, node_def, op, message, *args): super(FailedPreconditionError, self).__init__(node_def, op, message, FAILED_PRECONDITION, *args)", - "docstring": "Raised when some prerequisites are not met when running an operation. This typically indicates that system is not in state to execute the operation and requires preconditions to be met before successfully executing current operation. For example, this exception is commonly raised when running an operation that reads a before it has been initialized.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\framework\\errors_impl.py", - "ast_data": "ClassDef name:FailedPreconditionError Call call:tf_export FunctionDef name:__init__ arguments arg:self arg:node_def arg:op arg:message vararg:args" - }, - { - "library": "pytorch", - "name": "mgpu_tune_gemm_in_file", - "source_code": "def mgpu_tune_gemm_in_file(filename_pattern: str, num_gpus: int) -> None: unique_gemm_entries = _gather_unique_untuned_gemm_from_files(filename_pattern) total_gpus = torch.cuda.device_count() assert 1 < = num_gpus < = total_gpus mp_context = mp.get_context('spawn') futures = [] flush_results = [] h = 0 with concurrent.futures.ProcessPoolExecutor(max_workers = num_gpus, mp_context = mp_context, initializer = _check_tuning_assertions) as executor: for line in unique_gemm_entries: future = executor.submit(_process_single_offline_gemm, line, h) futures.append(future) h = (h + 1) % num_gpus for future in concurrent.futures.as_completed(futures): future.result() for g in range(num_gpus): flush_result = executor.submit(write_file) flush_results.append(flush_result) for flush_result in concurrent.futures.as_completed(flush_results): flush_result.result() torch.cuda.synchronize() _gather_tunableop_results()", - "docstring": "Process one or more files and distribute work over one or more GPUs.", - "type": "function", - "file_path": "pytorch\\torch\\cuda\\tunable.py", - "ast_data": "FunctionDef name:mgpu_tune_gemm_in_file arguments arg:filename_pattern type:str arg:num_gpus type:int Assign Call call:_gather_unique_untuned_gemm_from_files Assign Call call:device_count Assign Call call:get_context Assign Assign Assign With For Assign Call call:submit Assign For Call call:as_completed For Call call:range Assign Call call:submit For Call call:as_completed" - }, - { - "library": "pytorch", - "name": "format_frame", - "source_code": "def format_frame(frame, *, base = None, line = False): extra_line = '' if line: extra_line = f'{frame.line} # ' return f'{extra_line}{shorten_filename(frame.filename, base = base)}: {frame.lineno} in {frame.name}'", - "docstring": "Format a FrameSummary in a short way, without printing full absolute path or code. The idea is the result fits on a single line.", - "type": "function", - "file_path": "pytorch\\torch\\utils\\_traceback.py", - "ast_data": "FunctionDef name:format_frame arguments arg:frame Assign If Assign Return return:yes" - }, - { - "library": "django", - "name": "mark_safe", - "source_code": "@keep_lazy(SafeString) def mark_safe(s): if hasattr(s, '__html__'): return s if callable(s): return _safety_decorator(mark_safe, s) return SafeString(s)", - "docstring": "Explicitly mark a string as safe for (HTML) output purposes. The returned object can be used everywhere a string is appropriate. If used on a method as a decorator, mark the returned data as safe. Can be called multiple times on a single string.", - "type": "function", - "file_path": "django\\django\\utils\\safestring.py", - "ast_data": "FunctionDef name:mark_safe arguments arg:s Call call:keep_lazy If Call call:hasattr Return return:yes If Call call:callable Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "node_op_type", - "source_code": "def node_op_type(self, node_name, device_name = None): if not self._debug_graphs: raise LookupError('Node op types are not loaded from partition graphs yet.') device_name = self._infer_device_name(device_name, node_name) return self._debug_graphs[device_name].node_op_types[node_name]", - "docstring": "Get the op type of given node. Args: node_name: () name of the node. device_name: () name of the device. If there is only one device or if node_name exists on only one device, this argument is optional. Returns: () op type of the node. Raises: LookupError: If node op types have not been loaded from partition graphs yet.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_data.py", - "ast_data": "FunctionDef name:node_op_type arguments arg:self arg:node_name arg:device_name If Raise raises:LookupError('Node op types are not loaded from partition graphs yet.') Assign Call call:_infer_device_name Return return:yes" - }, - { - "library": "scikit-learn", - "name": "asarray", - "source_code": "def asarray(obj: Array | bool | int | float | complex | NestedSequence[bool | int | float | complex] | SupportsBufferProtocol, /, *, dtype: Optional[DType] = None, device: Optional[Device] = None, copy: Optional[bool] = None, **kwargs) -> Array: with cp.cuda.Device(device): if copy is None: return cp.asarray(obj, dtype = dtype, **kwargs) else: res = cp.array(obj, dtype = dtype, copy = copy, **kwargs) if not copy and res is not obj: raise ValueError('Unable to avoid copy while creating an array as requested') return res", - "docstring": "Array API compatibility wrapper for asarray(). See the corresponding documentation in the array library and/or the array API specification for more details.", - "type": "function", - "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\cupy\\_aliases.py", - "ast_data": "FunctionDef name:asarray arguments kwarg:kwargs With If Compare op:Is Return return:yes Assign Call call:array If BoolOp Compare op:IsNot Raise raises:ValueError('Unable to avoid copy while creating an array as requested') Return return:yes" - }, - { - "library": "mongo", - "name": "__getitem__", - "source_code": "def __getitem__(self, item: OCSPRequest) -> OCSPResponse: with self._lock: cache_key = self._get_cache_key(item) value = self._data[cache_key] this_update = _this_update(value) next_update = _next_update(value) assert this_update is not None assert next_update is not None now = _datetime.now(tz = timezone.utc) if this_update.tzinfo is None: now = now.replace(tzinfo = None) if this_update < = now < next_update: return value self._data.pop(cache_key, None) raise KeyError(cache_key)", - "docstring": "Get a cache entry if it exists. 'item' is of type cryptography.x509.ocsp.OCSPRequest Raises KeyError if the item is not in the cache.", - "type": "method", - "file_path": "mongo\\pymongo\\ocsp_cache.py", - "ast_data": "FunctionDef name:__getitem__ arguments arg:self arg:item type:OCSPRequest With Assign Call call:_get_cache_key Assign Assign Call call:_this_update Assign Call call:_next_update Assign Call call:now If Compare op:Is Assign Call call:replace If Compare op:LtE op:Lt Return return:yes Raise raises:KeyError(cache_key)" - }, - { - "library": "pandas", - "name": "from_range", - "source_code": "@classmethod def from_range(cls, data: range, name = None, dtype: Dtype | None = None) -> Self: if not isinstance(data, range): raise TypeError(f'{cls.__name__}(...) must be called with object coercible to a range, {data!r} was passed') cls._validate_dtype(dtype) return cls._simple_new(data, name = name)", - "docstring": "Create :class: from a `pandas.RangeIndexRangeIndex` object. It is particularly useful for constructing indices in an efficient and memory-friendly manner. Parameters ---------- data : range The range object to be converted into a RangeIndex. name : str, default None Name to be stored in the index. dtype : Dtype or None Data type for the RangeIndex. If None, the default integer type will be used. Returns ------- RangeIndex See Also -------- RangeIndex : Immutable Index implementing a monotonic integer range. Index : Immutable sequence used for indexing and alignment. Examples -------- >>> pd.RangeIndex.from_range(range(5)) RangeIndex(start=0, stop=5, step=1) >>> pd.RangeIndex.from_range(range(2, -10, -3)) RangeIndex(start=2, stop=-10, step=-3)", - "type": "method", - "file_path": "pandas\\pandas\\core\\indexes\\range.py", - "ast_data": "FunctionDef name:from_range arguments arg:cls arg:data type:range arg:name arg:dtype type:Dtype | None If Raise raises:TypeError(f'{cls.__name__}(...) must be called with object coercible to a range, {data!r} was passed') Return return:yes" - }, - { - "library": "scikit-learn", - "name": "yield_namespace_device_dtype_combinations", - "source_code": "def yield_namespace_device_dtype_combinations(include_numpy_namespaces = True): for array_namespace in yield_namespaces(include_numpy_namespaces = include_numpy_namespaces): if array_namespace = = 'torch': for device, dtype in itertools.product(('cpu', 'cuda'), ('float64', 'float32')): yield (array_namespace, device, dtype) yield (array_namespace, 'mps', 'float32') elif array_namespace = = 'array_api_strict': try: import array_api_strict yield (array_namespace, array_api_strict.Device('CPU_DEVICE'), 'float64') yield (array_namespace, array_api_strict.Device('device1'), 'float32') except ImportError: yield (array_namespace, 'CPU_DEVICE', 'float64') yield (array_namespace, 'device1', 'float32') else: yield (array_namespace, None, None)", - "docstring": "Yield supported namespace, device, dtype tuples for testing. Use this to test that an estimator works with all combinations. Use in conjunction with to give clearer pytest parametrization ID names. Parameters ---------- include_numpy_namespaces : bool, default=True If True, also yield numpy namespaces. Returns ------- array_namespace : str The name of the Array API namespace. device : str The name of the device on which to allocate the arrays. Can be None to indicate that the default value should be used. dtype_name : str The name of the data type to use for arrays. Can be None to indicate that the default value should be used.", - "type": "function", - "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py", - "ast_data": "FunctionDef name:yield_namespace_device_dtype_combinations arguments arg:include_numpy_namespaces For Call call:yield_namespaces If Compare op:Eq For Call call:product If Compare op:Eq Try ExceptHandler" - }, - { - "library": "matplotlib", - "name": "classproperty", - "source_code": "class classproperty: def __init__(self, fget, fset = None, fdel = None, doc = None): self._fget = fget if fset is not None or fdel is not None: raise ValueError('classproperty only implements fget.') self.fset = fset self.fdel = fdel self._doc = doc def __get__(self, instance, owner): return self._fget(owner) @property def fget(self): return self._fget", - "docstring": "Like , but also triggers on access via the class, and it is the *class* that's passed as argument. Examples -------- :: class C: @classproperty def foo(cls): return cls.__name__ assert C.foo == \"C\"", - "type": "class", - "file_path": "matplotlib\\lib\\matplotlib\\_api\\__init__.py", - "ast_data": "ClassDef name:classproperty FunctionDef name:__init__ arguments arg:self arg:fget arg:fset arg:fdel arg:doc Assign If BoolOp Compare op:IsNot Compare op:IsNot Raise raises:ValueError('classproperty only implements fget.') Assign Assign Assign FunctionDef name:__get__ arguments arg:self arg:instance arg:owner Return return:yes FunctionDef name:fget arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "savefig", - "source_code": "def savefig(self, figure = None, **kwargs): if not isinstance(figure, Figure): if figure is None: manager = Gcf.get_active() else: manager = Gcf.get_fig_manager(figure) if manager is None: raise ValueError(f'No figure {figure}') figure = manager.canvas.figure width, height = figure.get_size_inches() if self._n_figures = = 0: self._write_header(width, height) else: self._file.write(b'\\\\newpage\\\\ifdefined\\\\pdfpagewidth\\\\pdfpagewidth\\\\else\\\\pagewidth\\\\fi = %fin\\\\ifdefined\\\\pdfpageheight\\\\pdfpageheight\\\\else\\\\pageheight\\\\fi = %fin%%\\n' % (width, height)) figure.savefig(self._file, format = 'pgf', backend = 'pgf', **kwargs) self._n_figures + = 1", - "docstring": "Save a to this file as a new page. Any other keyword arguments are passed to . Parameters ---------- figure : or int, default: the active figure The figure, or index of the figure, that is saved to the file.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pgf.py", - "ast_data": "FunctionDef name:savefig arguments arg:self arg:figure kwarg:kwargs If If Compare op:Is Assign Call call:get_active Assign Call call:get_fig_manager If Compare op:Is Raise raises:ValueError(f'No figure {figure}') Assign Assign Call call:get_size_inches If Compare op:Eq" - }, - { - "library": "numpy", - "name": "chebpts2", - "source_code": "def chebpts2(npts): _npts = int(npts) if _npts ! = npts: raise ValueError('npts must be integer') if _npts < 2: raise ValueError('npts must be > = 2') x = np.linspace(-np.pi, 0, _npts) return np.cos(x)", - "docstring": "Chebyshev points of the second kind. The Chebyshev points of the second kind are the points `` sorted in ascending order. Parameters ---------- npts : int Number of sample points desired. Returns ------- pts : ndarray The Chebyshev points of the second kind.", - "type": "function", - "file_path": "numpy\\numpy\\polynomial\\chebyshev.py", - "ast_data": "FunctionDef name:chebpts2 arguments arg:npts Assign Call call:int If Compare op:NotEq Raise raises:ValueError('npts must be integer') If Compare op:Lt Raise raises:ValueError('npts must be >= 2') Assign Call call:linspace Return return:yes" - }, - { - "library": "flexx", - "name": "modules", - "source_code": "@property def modules(self): return self._modules", - "docstring": "The JSModule objects known to the asset store. Each module corresponds to a Python module.", - "type": "method", - "file_path": "flexx\\flexx\\app\\_assetstore.py", - "ast_data": "FunctionDef name:modules arguments arg:self Return return:yes" - }, - { - "library": "sphinx", - "name": "content_metadata", - "source_code": "def content_metadata(self) -> dict[str, Any]: writing_mode = self.config.epub_writing_mode if (source_date_epoch: = os.getenv('SOURCE_DATE_EPOCH')) is not None: time_tuple = time.gmtime(int(source_date_epoch)) else: time_tuple = time.gmtime() metadata = super().content_metadata() metadata['description'] = html.escape(self.config.epub_description) metadata['contributor'] = html.escape(self.config.epub_contributor) metadata['page_progression_direction'] = PAGE_PROGRESSION_DIRECTIONS.get(writing_mode) metadata['ibook_scroll_axis'] = IBOOK_SCROLL_AXIS.get(writing_mode) metadata['date'] = html.escape(time.strftime('%Y-%m-%dT%H: %M: %SZ', time_tuple)) metadata['version'] = html.escape(self.config.version) metadata['epub_version'] = self.config.epub_version return metadata", - "docstring": "Create a dictionary with all metadata for the content.opf file properly escaped.", - "type": "method", - "file_path": "sphinx\\sphinx\\builders\\epub3.py", - "ast_data": "FunctionDef name:content_metadata arguments arg:self Assign If Compare op:IsNot Assign Call call:gmtime Assign Call call:gmtime Assign Call call:content_metadata Assign Call call:escape Assign Call call:escape Assign Call call:get Assign Call call:get Assign Call call:escape Assign Call call:escape Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "tensor_summary", - "source_code": "@tf_export(v1 = ['summary.tensor_summary']) def tensor_summary(name, tensor, summary_description = None, collections = None, summary_metadata = None, family = None, display_name = None): if summary_metadata is None: summary_metadata = _SummaryMetadata() if summary_description is not None: summary_metadata.summary_description = summary_description if display_name is not None: summary_metadata.display_name = display_name serialized_summary_metadata = summary_metadata.SerializeToString() if _distribute_summary_op_util.skip_summary(): return _constant_op.constant('') with _summary_op_util.summary_scope(name, family, values = [tensor]) as (tag, scope): val = _gen_logging_ops.tensor_summary_v2(tensor = tensor, tag = tag, name = scope, serialized_summary_metadata = serialized_summary_metadata) _summary_op_util.collect(val, collections, [_ops.GraphKeys.SUMMARIES]) return val", - "docstring": "Outputs a protocol buffer with a serialized tensor.proto. Args: name: A name for the generated node. If display_name is not set, it will also serve as the tag name in TensorBoard. (In that case, the tag name will inherit tf name scopes.) tensor: A tensor of any type and shape to serialize. summary_description: A long description of the summary sequence. Markdown is supported. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to . summary_metadata: Optional SummaryMetadata proto (which describes which plugins may use the summary value). family: Optional; if provided, used as the prefix of the summary tag, which controls the name used for display on TensorBoard when display_name is not set. display_name: A string used to name this data in TensorBoard. If this is not set, then the node name will be used instead. Returns: A scalar of type . The serialized protocol buffer.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\summary\\summary.py", - "ast_data": "FunctionDef name:tensor_summary arguments arg:name arg:tensor arg:summary_description arg:collections arg:summary_metadata arg:family arg:display_name Call call:tf_export If Compare op:Is Assign Call call:_SummaryMetadata If Compare op:IsNot Assign If Compare op:IsNot Assign Assign Call call:SerializeToString If Call call:skip_summary Return return:yes With Assign Call call:tensor_summary_v2 Return return:yes" - }, - { - "library": "scipy", - "name": "tukeylambda_kurtosis", - "source_code": "def tukeylambda_kurtosis(lam): lam = np.asarray(lam) shp = lam.shape lam = np.atleast_1d(lam).astype(np.float64) threshold = 0.055 low_mask = lam < -0.25 negqrtr_mask = lam = = -0.25 small_mask = np.abs(lam) < threshold reg_mask = ~(low_mask | negqrtr_mask | small_mask) small = lam[small_mask] reg = lam[reg_mask] k = np.empty_like(lam) k[low_mask] = np.nan k[negqrtr_mask] = np.inf if small.size > 0: k[small_mask] = _tukeylambda_kurt_p(small) / _tukeylambda_kurt_q(small) if reg.size > 0: numer = 1.0 / (4 * reg + 1) - 4 * beta(3 * reg + 1, reg + 1) + 3 * beta(2 * reg + 1, 2 * reg + 1) denom = 2 * (1.0 / (2 * reg + 1) - beta(reg + 1, reg + 1)) ** 2 k[reg_mask] = numer / denom - 3 k.shape = shp return k", - "docstring": "Kurtosis of the Tukey Lambda distribution. Parameters ---------- lam : array_like The lambda values at which to compute the variance. Returns ------- v : ndarray The variance. For lam < -0.25, the variance is not defined, so np.nan is returned. For lam = 0.25, np.inf is returned.", - "type": "function", - "file_path": "scipy\\scipy\\stats\\_tukeylambda_stats.py", - "ast_data": "FunctionDef name:tukeylambda_kurtosis arguments arg:lam Assign Call call:asarray Assign Assign Call call:astype Assign Assign Compare op:Lt Assign Compare op:Eq Assign Compare op:Lt Assign Assign Assign Assign Call call:empty_like Assign Assign If Compare op:Gt Assign If Compare op:Gt Assign Assign Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "register_backend", - "source_code": "@classmethod def register_backend(cls, name, func, extended_api = False, devices: Optional[Union[str, list[str]]] = None) -> None: if not hasattr(Backend, name.upper()): setattr(Backend, name.upper(), name.lower()) if name.lower() not in Backend.backend_list: Backend.backend_list.append(name.lower()) if devices is not None: for device in devices: if device ! = 'cpu' and device ! = 'cuda': Backend.default_device_backend_map[device] = name.lower() Backend.backend_type_map[name.lower()] = ProcessGroup.BackendType.CUSTOM if devices is None: warnings.warn(f'Device capability of {name} unspecified, assuming `cpu` and `cuda`. Please specify it via the `devices` argument of `register_backend`.') Backend.backend_capability[name.lower()] = ['cpu', 'cuda'] elif isinstance(devices, str): Backend.backend_capability[name.lower()] = [devices] else: Backend.backend_capability[name.lower()] = devices Backend._plugins[name.upper()] = Backend._BackendPlugin(func, extended_api)", - "docstring": "Register a new backend with the given name and instantiating function. This class method is used by 3rd party `None`, assuming both \"cpu\" and \"cuda\" .. note:: This support of 3rd party backend is experimental and subject to change.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", - "ast_data": "FunctionDef name:register_backend arguments arg:cls arg:name arg:func arg:extended_api arg:devices type:Optional[Union[str, list[str]]] If If Compare op:NotIn If Compare op:IsNot For If BoolOp Compare op:NotEq Compare op:NotEq Assign Call call:lower Assign If Compare op:Is Assign If Call call:isinstance Assign Assign Assign Call call:_BackendPlugin" - }, - { - "library": "sphinx", - "name": "resolve_reference_in_inventory", - "source_code": "def resolve_reference_in_inventory(env: BuildEnvironment, inv_name: InventoryName, node: pending_xref, contnode: TextElement) -> nodes.reference | None: assert inventory_exists(env, inv_name) return _resolve_reference(inv_name, env.domains, InventoryAdapter(env).named_inventory[inv_name], False, frozenset(env.config.intersphinx_disabled_reftypes), node, contnode)", - "docstring": "Attempt to resolve a missing reference via intersphinx references. Resolution is tried in the given inventory with the target as is. Requires ``.", - "type": "function", - "file_path": "sphinx\\sphinx\\ext\\intersphinx\\_resolve.py", - "ast_data": "FunctionDef name:resolve_reference_in_inventory arguments arg:env type:BuildEnvironment arg:inv_name type:InventoryName arg:node type:pending_xref arg:contnode type:TextElement Return return:yes" - }, - { - "library": "matplotlib", - "name": "y1", - "source_code": "@property def y1(self): return self.get_points()[1, 1]", - "docstring": "The second of the pair of *y* coordinates that define the bounding box. This is not guaranteed to be greater than :attr: (for that, use :attr:).", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", - "ast_data": "FunctionDef name:y1 arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, sess, coord, stop_grace_period_secs = 120): _WrappedSession.__init__(self, sess) self._coord = coord self._stop_grace_period_secs = stop_grace_period_secs", - "docstring": "Create a new . Args: sess: A object. The wrapped session. coord: A object. stop_grace_period_secs: Number of seconds given to threads to stop after has been called.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:sess arg:coord arg:stop_grace_period_secs Assign Assign" - }, - { - "library": "kornia", - "name": "KORNIA_CHECK_SAME_SHAPE", - "source_code": "def KORNIA_CHECK_SAME_SHAPE(x: Tensor, y: Tensor, raises: bool = True) -> bool: if x.shape ! = y.shape: if raises: raise TypeError(f'Not same shape for tensors. Got: {x.shape} and {y.shape}') return False return True", - "docstring": "Check whether two tensor have the same shape. Args: x: first tensor to evaluate. y: sencod tensor to evaluate. msg: message to show in the exception. raises: bool indicating whether an exception should be raised upon failure. Raises: TypeException: if the two tensors have not the same shape and raises is True. Example: >>> x1 = torch.rand(2, 3, 3) >>> x2 = torch.rand(2, 3, 3) >>> KORNIA_CHECK_SAME_SHAPE(x1, x2) True", - "type": "function", - "file_path": "kornia\\kornia\\core\\check.py", - "ast_data": "FunctionDef name:KORNIA_CHECK_SAME_SHAPE arguments arg:x type:Tensor arg:y type:Tensor arg:raises type:bool If Compare op:NotEq If Raise raises:TypeError(f'Not same shape for tensors. Got: {x.shape} and {y.shape}') Return return:yes Return return:yes" - }, - { - "library": "mongo", - "name": "document", - "source_code": "@property def document(self) -> dict[str, Any]: return self.__document.copy()", - "docstring": "The document representation of this collation. .. note:: :class: is immutable. Mutating the value of :attr: does not mutate this :class:.", - "type": "method", - "file_path": "mongo\\pymongo\\collation.py", - "ast_data": "FunctionDef name:document arguments arg:self Return return:yes" - }, - { - "library": "coconut", - "name": "final", - "source_code": "def final(item): return add_action(trace(item), final_evaluate_tokens)", - "docstring": "Collapse the computation graph upon parsing the given item.", - "type": "function", - "file_path": "coconut\\coconut\\compiler\\util.py", - "ast_data": "FunctionDef name:final arguments arg:item Return return:yes" - }, - { - "library": "django", - "name": "get_models", - "source_code": "def get_models(self, include_auto_created = False, include_swapped = False): self.apps.check_models_ready() for model in self.models.values(): if model._meta.auto_created and (not include_auto_created): continue if model._meta.swapped and (not include_swapped): continue yield model", - "docstring": "Return an iterable of models. By default, the following models aren't included: - auto-created models for many-to-many relations without an explicit intermediate table, - models that have been swapped out. Set the corresponding keyword argument to True to include such models. Keyword arguments aren't documented; they're a private API.", - "type": "method", - "file_path": "django\\django\\apps\\config.py", - "ast_data": "FunctionDef name:get_models arguments arg:self arg:include_auto_created arg:include_swapped For Call call:values If BoolOp If BoolOp" - }, - { - "library": "pytorch", - "name": "make_image", - "source_code": "def make_image(tensor, rescale = 1, rois = None, labels = None): from PIL import Image height, width, channel = tensor.shape scaled_height = int(height * rescale) scaled_width = int(width * rescale) image = Image.fromarray(tensor) if rois is not None: image = draw_boxes(image, rois, labels = labels) ANTIALIAS = Image.Resampling.LANCZOS image = image.resize((scaled_width, scaled_height), ANTIALIAS) import io output = io.BytesIO() image.save(output, format = 'PNG') image_string = output.getvalue() output.close() return Summary.Image(height = height, width = width, colorspace = channel, encoded_image_string = image_string)", - "docstring": "Convert a numpy representation of an image to Image protobuf.", - "type": "function", - "file_path": "pytorch\\torch\\utils\\tensorboard\\summary.py", - "ast_data": "FunctionDef name:make_image arguments arg:tensor arg:rescale arg:rois arg:labels Assign Assign Call call:int Assign Call call:int Assign Call call:fromarray If Compare op:IsNot Assign Call call:draw_boxes Assign Assign Call call:resize Assign Call call:BytesIO Assign Call call:getvalue Return return:yes" - }, - { - "library": "pytorch", - "name": "gen_all_reshape_possibilities", - "source_code": "def gen_all_reshape_possibilities(list_of_dims, target): all_possibilities = generate_all_int_dyn_dim_possibilities(list_of_dims) all_constraints = [] for p in all_possibilities: to_multiply = [] p = list(p) for constraint in p: assert isinstance(constraint, BinConstraintD) if constraint.op = = op_neq: to_multiply.append(constraint.lhs) if not to_multiply: all_constraints.append(Conj(p)) elif len(to_multiply) < len(list_of_dims): all_constraints.append(Conj(p + [is_target_div_by_dim(target, Prod(to_multiply))])) else: all_constraints.append(Conj(p + [BinConstraintD(Prod(list_of_dims), Prod(target), op_eq)])) return Disj(all_constraints)", - "docstring": "Consider all possibilities what the input dimensions could be (number or dynamic) Then generate the appropriate constraints using multiplication or mod depending on the possibility The possibilities we consider here are the cross product of being equal to dyn or not equal to dyn for the input. Target is fixed because at most one dimension could be dyn. We have different cases for this. Args: list_of_dims: The input list of dimensions target: The tensor we want to reshape to Returns: A disjunction of transformed reshape constraints", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_transformation.py", - "ast_data": "FunctionDef name:gen_all_reshape_possibilities arguments arg:list_of_dims arg:target Assign Call call:generate_all_int_dyn_dim_possibilities Assign For Assign Assign Call call:list For If Compare op:Eq If If Compare op:Lt Return return:yes" - }, - { - "library": "cherrypy", - "name": "autovary", - "source_code": "def autovary(ignore = None, debug = False): request = cherrypy.serving.request req_h = request.headers request.headers = MonitoredHeaderMap() request.headers.update(req_h) if ignore is None: ignore = set(['Content-Disposition', 'Content-Length', 'Content-Type']) def set_response_header(): resp_h = cherrypy.serving.response.headers v = set([e.value for e in resp_h.elements('Vary')]) if debug: cherrypy.log('Accessed headers: %s' % request.headers.accessed_headers, 'TOOLS.AUTOVARY') v = v.union(request.headers.accessed_headers) v = v.difference(ignore) v = list(v) v.sort() resp_h['Vary'] = ', '.join(v) request.hooks.attach('before_finalize', set_response_header, 95)", - "docstring": "Populate `` access.", - "type": "function", - "file_path": "cherrypy\\cherrypy\\lib\\cptools.py", - "ast_data": "FunctionDef name:autovary arguments arg:ignore arg:debug Assign Assign Assign Call call:MonitoredHeaderMap If Compare op:Is Assign Call call:set FunctionDef name:set_response_header arguments Assign Assign Call call:set If Assign Call call:union Assign Call call:difference Assign Call call:list Assign Call call:join" - }, - { - "library": "salmon", - "name": "render_pep440_branch", - "source_code": "def render_pep440_branch(pieces: Dict[str, Any]) -> str: if pieces['closest-tag']: rendered = pieces['closest-tag'] if pieces['distance'] or pieces['dirty']: if pieces['branch'] ! = 'master': rendered + = '.dev0' rendered + = plus_or_dot(pieces) rendered + = '%d.g%s' % (pieces['distance'], pieces['short']) if pieces['dirty']: rendered + = '.dirty' else: rendered = '0' if pieces['branch'] ! = 'master': rendered + = '.dev0' rendered + = '+untagged.%d.g%s' % (pieces['distance'], pieces['short']) if pieces['dirty']: rendered + = '.dirty' return rendered", - "docstring": "TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . The \".dev0\" means not master branch. Note that .dev0 sorts backwards (a feature branch will appear \"older\" than the master branch). Exceptions: 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty]", - "type": "function", - "file_path": "salmon\\salmon\\_version.py", - "ast_data": "FunctionDef name:render_pep440_branch arguments arg:pieces type:Dict[str, Any] If Assign If BoolOp If Compare op:NotEq If Assign If Compare op:NotEq If Return return:yes" - }, - { - "library": "pytorch", - "name": "add_push_null_call_function_ex", - "source_code": "def add_push_null_call_function_ex(inst_or_insts: Union[Instruction, list[Instruction]]) -> list[Instruction]: if isinstance(inst_or_insts, Instruction): insts = [inst_or_insts] else: insts = inst_or_insts if sys.version_info < (3, 11): return insts idx = -1 if sys.version_info > = (3, 13) else 0 if insts[idx].opname = = 'LOAD_GLOBAL': assert insts[idx].arg is not None if insts[idx].arg & 1 = = 0: insts[idx].arg | = 1 return insts if sys.version_info > = (3, 13): insts = insts + [create_instruction('PUSH_NULL')] else: insts = [create_instruction('PUSH_NULL')] + insts return insts", - "docstring": "Like add_push_null, but the low bit of LOAD_ATTR/LOAD_SUPER_ATTR is not set, due to an expected CALL_FUNCTION_EX instruction.", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\bytecode_transformation.py", - "ast_data": "FunctionDef name:add_push_null_call_function_ex arguments arg:inst_or_insts type:Union[Instruction, list[Instruction]] If Call call:isinstance Assign Assign If Compare op:Lt Return return:yes Assign If Compare op:Eq If Compare op:Eq Return return:yes If Compare op:GtE Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "streaming_restore", - "source_code": "@tf_export('__internal__.tracking.streaming_restore', v1 = []) def streaming_restore(status, session = None): if context.executing_eagerly(): return if session is None: session = get_session() if isinstance(status, NameBasedSaverStatus): raise NotImplementedError('Streaming restore not supported from name-based checkpoints when graph building. File a feature request if this limitation bothers you. As a workaround, consider either using tf.train.Checkpoint to load name-based checkpoints or enabling eager execution.') status.run_restore_ops(session = session) status._checkpoint.new_restore_ops_callback = lambda ops: session.run(ops, feed_dict = status._feed_dict)", - "docstring": "When graph building, runs restore ops as soon as they come in. Args: status: A _LoadStatus objects from an object-based saver's restore(). Streaming restore from name-based checkpoints is not currently supported. session: A session to run new restore ops in.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\checkpoint\\checkpoint.py", - "ast_data": "FunctionDef name:streaming_restore arguments arg:status arg:session Call call:tf_export If Call call:executing_eagerly Return return:no If Compare op:Is Assign Call call:get_session If Call call:isinstance Raise raises:NotImplementedError('Streaming restore not supported from name-based checkpoints when graph building. File a feature request if this limitation bothers you. As a workaround, consider either using tf.train.Checkpoint to load name-based checkpoints or enabling eager execution.') Assign" - }, - { - "library": "scikit-learn", - "name": "inverse_transform", - "source_code": "def inverse_transform(self, X): xp, _ = get_namespace(X) if self.whiten: scaled_components = xp.sqrt(self.explained_variance_[:, np.newaxis]) * self.components_ return X @ scaled_components + self.mean_ else: return X @ self.components_ + self.mean_", - "docstring": "Transform data back to its original space. In other words, return an input whose transform would be X. Parameters ---------- X : array-like of shape (n_samples, n_components) New data, where is the number of samples and is the number of components. Returns ------- X_original : array-like of shape (n_samples, n_features) Original data, where is the number of samples and is the number of features. Notes ----- If whitening is enabled, inverse_transform will compute the exact inverse operation, which includes reversing whitening.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\decomposition\\_base.py", - "ast_data": "FunctionDef name:inverse_transform arguments arg:self arg:X Assign Call call:get_namespace If Assign Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "exceptions_raised", - "source_code": "@property def exceptions_raised(self): return self._exceptions_raised", - "docstring": "Exceptions raised but not handled by the threads. Exceptions raised in queue runner threads are handled in one of two ways depending on whether or not a was passed to : * With a , exceptions are reported to the coordinator and forgotten by the . * Without a , exceptions are captured by the and made available in this property. Returns: A list of Python objects. The list is empty if no exception was captured. (No exceptions are captured when using a Coordinator.)", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\queue_runner_impl.py", - "ast_data": "FunctionDef name:exceptions_raised arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "check_filterable", - "source_code": "def check_filterable(self, expression): if hasattr(expression, 'resolve_expression') and (not getattr(expression, 'filterable', True)): raise NotSupportedError(expression.__class__.__name__ + ' is disallowed in the filter clause.') if hasattr(expression, 'get_source_expressions'): for expr in expression.get_source_expressions(): self.check_filterable(expr)", - "docstring": "Raise an error if expression cannot be used in a WHERE clause.", - "type": "method", - "file_path": "django\\django\\db\\models\\sql\\query.py", - "ast_data": "FunctionDef name:check_filterable arguments arg:self arg:expression If BoolOp Call call:hasattr Raise raises:NotSupportedError(expression.__class__.__name__ + ' is disallowed in the filter clause.') If Call call:hasattr For Call call:get_source_expressions" - }, - { - "library": "pytorch", - "name": "get_overlapping_candidate", - "source_code": "def get_overlapping_candidate(): candidates = [x for x in ready if not contains_collective(x.snode) and (not contains_wait(x.snode))] if len(candidates) = = 0: return None return min(candidates, key = lambda x: x.score)", - "docstring": "Return the next node in the ready queue that's neither a collective or a wait.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\comms.py", - "ast_data": "FunctionDef name:get_overlapping_candidate arguments Assign If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "run", - "source_code": "def run(self, fetches, feed_dict = None, options = None, run_metadata = None): raise NotImplementedError('run')", - "docstring": "Runs operations in the session. See for details.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\client\\session.py", - "ast_data": "FunctionDef name:run arguments arg:self arg:fetches arg:feed_dict arg:options arg:run_metadata Raise raises:NotImplementedError('run')" - }, - { - "library": "tensorflow", - "name": "concat", - "source_code": "@dispatch.dispatch_for_types(array_ops.concat, StructuredTensor) def concat(values, axis, name: str = 'concat'): if name is None: name = 'concat' _assert_concat_compatible_structured_tensors(values) def leaf_op(values): return array_ops.concat(values, axis) axis = array_ops.get_positive_axis(axis, values[0].rank) with ops.name_scope(name, 'StructuredConcat', values): return _extend_op(values, leaf_op)", - "docstring": "tf.concat for structured tensors. Does not support (yet) checks on illegal axis values, et cetera. Args: values: a sequence of StructuredTensors. axis: an axis to concatenate upon. name: the name of the op(s). Returns: the params reorganized according to indices.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\structured\\structured_array_ops.py", - "ast_data": "FunctionDef name:concat arguments arg:values arg:axis arg:name type:str Call call:dispatch_for_types If Compare op:Is Assign FunctionDef name:leaf_op arguments arg:values Return return:yes Assign Call call:get_positive_axis With Return return:yes" - }, - { - "library": "cherrypy", - "name": "Host", - "source_code": "class Host(object): ip = '0.0.0.0' port = 80 name = 'unknown.tld' def __init__(self, ip, port, name = None): self.ip = ip self.port = port if name is None: name = ip self.name = name def __repr__(self): return 'httputil.Host(%r, %r, %r)' % (self.ip, self.port, self.name)", - "docstring": "An internet address. name Should be the client's host name. If not available (because no DNS lookup is performed), the IP address should be used instead.", - "type": "class", - "file_path": "cherrypy\\cherrypy\\lib\\httputil.py", - "ast_data": "ClassDef name:Host Assign Assign Assign FunctionDef name:__init__ arguments arg:self arg:ip arg:port arg:name Assign Assign If Compare op:Is Assign Assign FunctionDef name:__repr__ arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "fixture_dirs", - "source_code": "@cached_property def fixture_dirs(self): dirs = [] fixture_dirs = settings.FIXTURE_DIRS if len(fixture_dirs) ! = len(set(fixture_dirs)): raise ImproperlyConfigured('settings.FIXTURE_DIRS contains duplicates.') for app_config in apps.get_app_configs(): app_label = app_config.label app_dir = os.path.join(app_config.path, 'fixtures') if app_dir in [str(d) for d in fixture_dirs]: raise ImproperlyConfigured(\"'%s' is a default fixture directory for the '%s' app and cannot be listed in settings.FIXTURE_DIRS.\" % (app_dir, app_label)) if self.app_label and app_label ! = self.app_label: continue if os.path.isdir(app_dir): dirs.append(app_dir) dirs.extend(fixture_dirs) dirs.append('') return [os.path.realpath(d) for d in dirs]", - "docstring": "Return a list of fixture directories. The list contains the 'fixtures' subdirectory of each installed application, if it exists, the directories in FIXTURE_DIRS, and the current directory.", - "type": "method", - "file_path": "django\\django\\core\\management\\commands\\loaddata.py", - "ast_data": "FunctionDef name:fixture_dirs arguments arg:self Assign Assign If Compare op:NotEq Raise raises:ImproperlyConfigured('settings.FIXTURE_DIRS contains duplicates.') For Call call:get_app_configs Assign Assign Call call:join If Compare op:In Raise raises:ImproperlyConfigured(\"'%s' is a default fixture directory for the '%s' app and cannot be listed in settings.FIXTURE_DIRS.\" % (app_dir, app_label)) If BoolOp Compare op:NotEq If Call call:isdir Return return:yes" - }, - { - "library": "matplotlib", - "name": "add_positions", - "source_code": "def add_positions(self, position): if position is None or (hasattr(position, 'len') and len(position) = = 0): return positions = self.get_positions() positions = np.hstack([positions, np.asanyarray(position)]) self.set_positions(positions)", - "docstring": "Add one or more events at the specified positions.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\collections.py", - "ast_data": "FunctionDef name:add_positions arguments arg:self arg:position If BoolOp Compare op:Is BoolOp Call call:hasattr Compare op:Eq Return return:no Assign Call call:get_positions Assign Call call:hstack" - }, - { - "library": "pandas", - "name": "BaseIndexer", - "source_code": "class BaseIndexer: def __init__(self, index_array: np.ndarray | None = None, window_size: int = 0, **kwargs) -> None: self.index_array = index_array self.window_size = window_size for key, value in kwargs.items(): setattr(self, key, value) @Appender(get_window_bounds_doc) def get_window_bounds(self, num_values: int = 0, min_periods: int | None = None, center: bool | None = None, closed: str | None = None, step: int | None = None) -> tuple[np.ndarray, np.ndarray]: raise NotImplementedError", - "docstring": "Base class for window bounds calculations. Parameters ---------- index_array : np.ndarray, default None Array-like structure representing the indices for the data points. If None, the default indices are assumed. This can be useful for handling non-uniform indices in data, such as in time series with irregular timestamps. window_size : int, default 0 Size of the moving window. This is the number of observations used for calculating the statistic. The default is to consider all observations within the window. **kwargs Additional keyword arguments passed to the subclass's methods. See Also -------- DataFrame.rolling : Provides rolling window calculations on dataframe. Series.rolling : Provides rolling window calculations on series. Examples -------- >>> from pandas.api.indexers import BaseIndexer >>> class CustomIndexer(BaseIndexer): ... def get_window_bounds(self, num_values, min_periods, center, closed, step): ... start = np.arange(num_values, dtype=np.int64) ... end = np.arange(num_values, dtype=np.int64) + self.window_size ... return start, end >>> df = pd.DataFrame({\"values\": range(5)}) >>> indexer = CustomIndexer(window_size=2) >>> df.rolling(indexer).sum() values 0 1.0 1 3.0 2 5.0 3 7.0 4 4.0", - "type": "class", - "file_path": "pandas\\pandas\\core\\indexers\\objects.py", - "ast_data": "ClassDef name:BaseIndexer FunctionDef name:__init__ arguments arg:self arg:index_array type:np.ndarray | None arg:window_size type:int kwarg:kwargs Assign Assign For Call call:items FunctionDef name:get_window_bounds arguments arg:self arg:num_values type:int arg:min_periods type:int | None arg:center type:bool | None arg:closed type:str | None arg:step type:int | None Call call:Appender Raise raises:NotImplementedError" - }, - { - "library": "tensorflow", - "name": "unique_name", - "source_code": "def unique_name(self, name, mark_as_used = True) -> str: if self._name_stack: name = self._name_stack + '/' + name name_key = name.lower() i = self._names_in_use.get(name_key, 0) if mark_as_used: self._names_in_use[name_key] = i + 1 if i > 0: base_name_key = name_key while name_key in self._names_in_use: name_key = '%s_%d' % (base_name_key, i) i + = 1 if mark_as_used: self._names_in_use[name_key] = 1 name = '%s_%d' % (name, i - 1) return name", - "docstring": "Return a unique operation name for . Note: You rarely need to call directly. Most of the time you just need to create blocks to generate structured names. is used to generate structured names, separated by , to help identify operations when debugging a graph. Operation names are displayed in error messages reported by the TensorFlow runtime, and in various visualization tools such as TensorBoard. If is set to , which is the default, a new unique name is created and marked as in use. If it's set to , the unique name is returned without actually being marked as used. This is useful when the caller simply wants to know what the name to be created will be. Args: name: The name for an operation. mark_as_used: Whether to mark this name as being used. Returns: A string to be passed to that will be used to name the operation being created.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", - "ast_data": "FunctionDef name:unique_name arguments arg:self arg:name arg:mark_as_used If Assign Assign Call call:lower Assign Call call:get If Assign If Compare op:Gt Assign While Compare op:In Assign If Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "If", - "source_code": "def If(cond, inputs, then_branch, else_branch, name = None): if isinstance(then_branch, function._DefinedFunction): tlist = [_.type for _ in then_branch.definition.signature.output_arg] return gen_functional_ops._if(cond, inputs, tlist, then_branch, else_branch, name = name) then_out = then_branch.structured_outputs else_out = else_branch.structured_outputs nest.assert_same_structure(then_out, else_out, expand_composites = True) tlist = nest.flatten(then_branch.output_dtypes) ret = gen_functional_ops._if(cond, inputs, tlist, then_branch, else_branch, name = name) return nest.pack_sequence_as(then_out, ret, expand_composites = True)", - "docstring": "output = Cond(inputs) ? then_branch(inputs) : else_branch(inputs). Args: cond: A . A scalar. If the scalar is not a boolean, the scalar is converted to a boolean according to the following rule: if the scalar is a numerical value, non-zero means True and zero means False; if the scalar is a string, non-empty means True and empty means False. inputs: A list of input tensors. then_branch: A function takes 'inputs' and returns a list of tensors, whose types are the same as what else_branch returns. else_branch: A function takes 'inputs' and returns a list of tensors. whose types are the same as what then_branch returns. name: A name for the operation (optional). Returns: A list of tensors returned by either then_branch(inputs) or else_branch(inputs).", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\functional_ops.py", - "ast_data": "FunctionDef name:If arguments arg:cond arg:inputs arg:then_branch arg:else_branch arg:name If Call call:isinstance Assign Return return:yes Assign Assign Assign Call call:flatten Assign Call call:_if Return return:yes" - }, - { - "library": "pandas", - "name": "start", - "source_code": "@property def start(self) -> int: return self._range.start", - "docstring": "The value of the parameter (`RangeIndexstartRangeIndexRangeIndexRangeIndex`. Examples -------- >>> idx = pd.RangeIndex(5) >>> idx.start 0 >>> idx = pd.RangeIndex(2, -10, -3) >>> idx.start 2", - "type": "method", - "file_path": "pandas\\pandas\\core\\indexes\\range.py", - "ast_data": "FunctionDef name:start arguments arg:self Return return:yes" - }, - { - "library": "mongo", - "name": "validate_uuid_representation", - "source_code": "def validate_uuid_representation(dummy: Any, value: Any) -> int: try: return _UUID_REPRESENTATIONS[value] except KeyError: raise ValueError(f'{value} is an invalid UUID representation. Must be one of {tuple(_UUID_REPRESENTATIONS)}') from None", - "docstring": "Validate the uuid representation option selected in the URI.", - "type": "function", - "file_path": "mongo\\pymongo\\common.py", - "ast_data": "FunctionDef name:validate_uuid_representation arguments arg:dummy type:Any arg:value type:Any Try Return return:yes ExceptHandler Raise raises:ValueError(f'{value} is an invalid UUID representation. Must be one of {tuple(_UUID_REPRESENTATIONS)}')" - }, - { - "library": "algorithms", - "name": "ternary_search", - "source_code": "def ternary_search(left, right, key, arr): while right > = left: mid1 = left + (right - left) // 3 mid2 = right - (right - left) // 3 if key = = arr[mid1]: return mid1 if key = = mid2: return mid2 if key < arr[mid1]: right = mid1 - 1 elif key > arr[mid2]: left = mid2 + 1 else: left = mid1 + 1 right = mid2 - 1 return -1", - "docstring": "Find the given value (key) in an array sorted in ascending order. Returns the index of the value if found, and -1 otherwise. If the index is not in the range left..right (ie. left <= index < right) returns -1.", - "type": "function", - "file_path": "algorithms\\algorithms\\search\\ternary_search.py", - "ast_data": "FunctionDef name:ternary_search arguments arg:left arg:right arg:key arg:arr While Compare op:GtE Assign Assign If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes If Compare op:Lt Assign If Compare op:Gt Assign Assign Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_xy2", - "source_code": "def set_xy2(self, *args, **kwargs): if self._slope is None: params = _api.select_matching_signature([lambda self, x, y: locals(), lambda self, xy2: locals()], self, *args, **kwargs) if 'x' in params: _api.warn_deprecated('3.10', message = 'Passing x and y separately to AxLine.set_xy2 is deprecated since %(since)s; pass them as a single tuple instead.') xy2 = (params['x'], params['y']) else: xy2 = params['xy2'] self._xy2 = xy2 else: raise ValueError(\"Cannot set an 'xy2' value while 'slope' is set; they differ but their functionalities overlap\")", - "docstring": "Set the *xy2* value of the line. .. note:: You can only set *xy2* if the line was created using the *xy2* parameter. If the line was created using *slope*, please use . Parameters ---------- xy2 : tuple[float, float] Points for the line to pass through.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\lines.py", - "ast_data": "FunctionDef name:set_xy2 arguments arg:self vararg:args kwarg:kwargs If Compare op:Is Assign Call call:select_matching_signature If Compare op:In Assign Assign Assign Raise raises:ValueError(\"Cannot set an 'xy2' value while 'slope' is set; they differ but their functionalities overlap\")" - }, - { - "library": "pytorch", - "name": "static_dispatch", - "source_code": "def static_dispatch(sig: CppSignature | ExecutorchCppSignature, f: NativeFunction, backend_indices: list[BackendIndex]) -> str: if len(backend_indices) = = 0 or f.manual_kernel_registration: return '' backends = [b for b in backend_indices if b.has_kernel(f)] static_block = None if len(backends) = = 1: backend_metadata = backends[0].get_kernel(f) if backend_metadata: args = ', '.join((a.name for a in sig.arguments())) static_block = f'return: : {backend_metadata.cpp_namespace}: : {backend_metadata.kernel}({args});' else: static_block = f'\\nET_ASSERT_UNREACHABLE_MSG(\"The number of native function(s) binding to {f.func.name} is {len(backends)}.\");\\n ' return f'\\n// {f.namespace}: : {f.func}\\nTORCH_API inline {_sig_decl_wrapper(sig)} {{\\n {static_block}\\n}}\\n'", - "docstring": "For a given , find out the corresponding native function and dispatch to it. If zero or more than one native function exists, error out. A simplified version of register_dispatch_key.py Arguments: sig: A CppSignature for this native function we want to use. f: NativeFunction to generate static dispatch. backend_indices: All available backends. Return: C++ code to call backend-specific functions, e.g., \"return at::native::add(self, other, scale);\"", - "type": "function", - "file_path": "pytorch\\torchgen\\gen_executorch.py", - "ast_data": "FunctionDef name:static_dispatch arguments arg:sig type:CppSignature | ExecutorchCppSignature arg:f type:NativeFunction arg:backend_indices type:list[BackendIndex] If BoolOp Compare op:Eq Return return:yes Assign Assign If Compare op:Eq Assign Call call:get_kernel If Assign Call call:join Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "rsample", - "source_code": "def rsample(self, sample_shape: _size = torch.Size()) -> Tensor: raise NotImplementedError", - "docstring": "Generates a sample_shape shaped reparameterized sample or sample_shape shaped batch of reparameterized samples if the distribution parameters are batched.", - "type": "method", - "file_path": "pytorch\\torch\\distributions\\distribution.py", - "ast_data": "FunctionDef name:rsample arguments arg:self arg:sample_shape type:_size Raise raises:NotImplementedError" - }, - { - "library": "pytorch", - "name": "AsyncClosureHandler", - "source_code": "class AsyncClosureHandler(ClosureHandler): def __init__(self, max_queue_size = 100): super().__init__() self._closure_queue: Queue = Queue(int(os.environ.get('LTC_MAX_ASYNC_QUEUE', max_queue_size))) self._closure_exception: Queue = Queue() self._closure_lock = threading.Lock() self._closure_event_loop_finished = threading.Event() self._closure_event_loop = None def start_event_loop(self): if self._closure_event_loop is None: def event_loop(): while True: try: closure = self._closure_queue.get(block = True, timeout = 3) closure() self._closure_queue.task_done() except EmptyQueue: with self._closure_lock: if self._closure_queue.empty(): self._closure_event_loop_finished.set() return except Exception as e: self._closure_exception.put(e) return self._closure_event_loop = threading.Thread(target = event_loop) self._closure_event_loop.start() def run(self, closure): with self._closure_lock: self._closure_queue.put(closure, block = True) if self._closure_event_loop is None or not self._closure_event_loop.is_alive(): try: e = self._closure_exception.get(block = False) raise RuntimeError('Cannot run asynchronous closure due to previously raised exception') from e except EmptyQueue: self._closure_event_loop = None self.start_event_loop()", - "docstring": "Handler for Asynchronous Step Closures Args: max_queue_size: The maximum length of the closure queue after which the training loop will block until closures are evaluated. By default, a reasonable limit of a maximum of 100 on the queue. This value can be set using the environment variable.", - "type": "class", - "file_path": "pytorch\\torch\\_lazy\\closure.py", - "ast_data": "ClassDef name:AsyncClosureHandler FunctionDef name:__init__ arguments arg:self arg:max_queue_size Assign Call call:Lock Assign Call call:Event Assign FunctionDef name:start_event_loop arguments arg:self If Compare op:Is FunctionDef name:event_loop arguments While Try Assign Call call:get ExceptHandler With If Call call:empty Return return:no ExceptHandler Return return:no Assign Call call:Thread FunctionDef name:run arguments arg:self arg:closure With If BoolOp Compare op:Is Try Assign Call call:get Raise raises:RuntimeError('Cannot run asynchronous closure due to previously raised exception') ExceptHandler Assign" - }, - { - "library": "pytorch", - "name": "add_quant_dequant", - "source_code": "def add_quant_dequant(module): if has_no_children_ignoring_parametrizations(module) and hasattr(module, 'qconfig') and module.qconfig: return QuantWrapper(module) for name, child in module.named_children(): module._modules[name] = add_quant_dequant(child) return module", - "docstring": "Wrap the leaf child module in QuantWrapper if it has a valid qconfig Note that this function will modify the children of module inplace and it can return a new module which wraps the input module as well. Args: module: input module with qconfig attributes for all the leaf modules that we want to quantize Return: Either the inplace modified module with submodules wrapped in based on qconfig or a new module which wraps the input module, the latter case only happens when the input module is a leaf module and we want to quantize it.", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\quantize.py", - "ast_data": "FunctionDef name:add_quant_dequant arguments arg:module If BoolOp Call call:has_no_children_ignoring_parametrizations Call call:hasattr Return return:yes For Call call:named_children Assign Call call:add_quant_dequant Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "@deprecation.deprecated('2019-01-01', 'The TensorFlow Distributions library has moved to TensorFlow Probability (https: //github.com/tensorflow/probability). You should update all references to use `tfp.distributions` instead of `tf.distributions`.', warn_once = True) def __init__(self, concentration, rate, validate_args = False, allow_nan_stats = True, name = 'Gamma'): parameters = dict(locals()) with ops.name_scope(name, values = [concentration, rate]) as name: with ops.control_dependencies([check_ops.assert_positive(concentration), check_ops.assert_positive(rate)] if validate_args else []): self._concentration = array_ops.identity(concentration, name = 'concentration') self._rate = array_ops.identity(rate, name = 'rate') check_ops.assert_same_float_dtype([self._concentration, self._rate]) super(Gamma, self).__init__(dtype = self._concentration.dtype, validate_args = validate_args, allow_nan_stats = allow_nan_stats, reparameterization_type = distribution.FULLY_REPARAMETERIZED, parameters = parameters, graph_parents = [self._concentration, self._rate], name = name)", - "docstring": "Construct Gamma with and parameters. The parameters and must be shaped in a way that supports broadcasting (e.g. is a valid operation). Args: concentration: Floating point tensor, the concentration params of the distribution(s). Must contain only positive values. rate: Floating point tensor, the inverse scale params of the distribution(s). Must contain only positive values. validate_args: Python , default . When distribution parameters are checked for validity despite possibly degrading runtime performance. When invalid inputs may silently render incorrect outputs. allow_nan_stats: Python , default . When , statistics (e.g., mean, mode, variance) use the value \"\" to indicate the result is undefined. When , an exception is raised if one or more of the statistic's batch members are undefined. name: Python name prefixed to Ops created by this class. Raises: TypeError: if and are different dtypes.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\gamma.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:concentration arg:rate arg:validate_args arg:allow_nan_stats arg:name Call call:deprecated Assign Call call:dict With With Assign Call call:identity Assign Call call:identity" - }, - { - "library": "cherrypy", - "name": "urljoin", - "source_code": "def urljoin(*atoms): url = '/'.join([x for x in atoms if x]) while '//' in url: url = url.replace('//', '/') return url or '/'", - "docstring": "Return the given path \\*atoms, joined into a single URL. This will correctly join a SCRIPT_NAME and PATH_INFO into the original URL, even if either atom is blank.", - "type": "function", - "file_path": "cherrypy\\cherrypy\\lib\\httputil.py", - "ast_data": "FunctionDef name:urljoin arguments vararg:atoms Assign Call call:join While Compare op:In Assign Call call:replace Return return:yes" - }, - { - "library": "scipy", - "name": "get_numerical_endpoints", - "source_code": "def get_numerical_endpoints(self, parameter_values): a, b = self.endpoints try: if callable(a): a = a(**parameter_values) else: a = np.asarray(parameter_values.get(a, a)) if callable(b): b = b(**parameter_values) else: b = np.asarray(parameter_values.get(b, b)) except TypeError as e: message = f'The endpoints of the distribution are defined by parameters, but their values were not provided. When using a private method of {self.__class__}, pass all required distribution parameters as keyword arguments.' raise TypeError(message) from e a, b = xp_promote(a, b, force_floating = True, xp = np) return (a, b)", - "docstring": "Get the numerical values of the domain endpoints. Domain endpoints may be defined symbolically or through a callable. This returns numerical values of the endpoints given numerical values for any variables. Parameters ---------- parameter_values : dict A dictionary that maps between string variable names and numerical values of parameters, which may define the endpoints. Returns ------- a, b : ndarray Numerical values of the endpoints", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_distribution_infrastructure.py", - "ast_data": "FunctionDef name:get_numerical_endpoints arguments arg:self arg:parameter_values Assign Try If Call call:callable Assign Call call:a Assign Call call:asarray If Call call:callable Assign Call call:b Assign Call call:asarray ExceptHandler Assign Raise raises:TypeError(message) Assign Call call:xp_promote Return return:yes" - }, - { - "library": "tensorflow", - "name": "initial_value", - "source_code": "@property def initial_value(self): if context.executing_eagerly(): raise RuntimeError('This property is not supported when eager execution is enabled.') return self._initial_value", - "docstring": "Returns the Tensor used as the initial value for the variable.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", - "ast_data": "FunctionDef name:initial_value arguments arg:self If Call call:executing_eagerly Raise raises:RuntimeError('This property is not supported when eager execution is enabled.') Return return:yes" - }, - { - "library": "pytorch", - "name": "get_sharing_strategy", - "source_code": "def get_sharing_strategy(): return _sharing_strategy", - "docstring": "Return the current strategy for sharing CPU tensors.", - "type": "function", - "file_path": "pytorch\\torch\\multiprocessing\\__init__.py", - "ast_data": "FunctionDef name:get_sharing_strategy arguments Return return:yes" - }, - { - "library": "scikit-learn", - "name": "decision_function", - "source_code": "@available_if(_check_novelty_decision_function) def decision_function(self, X): return self.score_samples(X) - self.offset_", - "docstring": "Shifted opposite of the Local Outlier Factor of X. Bigger is better, i.e. large values correspond to inliers. **Only available for novelty detection (when novelty is set to True).** The shift offset allows a zero threshold for being an outlier. The argument X is supposed to contain *new data*: if X contains a point from training, it considers the later in its own neighborhood. Also, the samples in X are not considered in the neighborhood of any point. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) The query sample or samples to compute the Local Outlier Factor w.r.t. the training samples. Returns ------- shifted_opposite_lof_scores : ndarray of shape (n_samples,) The shifted opposite of the Local Outlier Factor of each input samples. The lower, the more abnormal. Negative scores represent outliers, positive scores represent inliers.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\neighbors\\_lof.py", - "ast_data": "FunctionDef name:decision_function arguments arg:self arg:X Call call:available_if Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_bbox_to_anchor", - "source_code": "def set_bbox_to_anchor(self, bbox, transform = None): if bbox is None: self._bbox_to_anchor = None return elif isinstance(bbox, BboxBase): self._bbox_to_anchor = bbox else: try: l = len(bbox) except TypeError as err: raise ValueError(f'Invalid bbox: {bbox}') from err if l = = 2: bbox = [bbox[0], bbox[1], 0, 0] self._bbox_to_anchor = Bbox.from_bounds(*bbox) if transform is None: transform = BboxTransformTo(self.parent.bbox) self._bbox_to_anchor = TransformedBbox(self._bbox_to_anchor, transform) self.stale = True", - "docstring": "Set the bbox that the legend will be anchored to. Parameters ---------- bbox : or tuple The bounding box can be specified in the following ways: - A instance - A tuple of `~matplotlib.transforms.Transform`, optional A transform to apply to the bounding box. If not specified, this will use a transform to the bounding box of the parent.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\legend.py", - "ast_data": "FunctionDef name:set_bbox_to_anchor arguments arg:self arg:bbox arg:transform If Compare op:Is Assign Return return:no If Call call:isinstance Assign Try Assign Call call:len ExceptHandler Raise raises:ValueError(f'Invalid bbox: {bbox}') If Compare op:Eq Assign Assign Call call:from_bounds If Compare op:Is Assign Call call:BboxTransformTo Assign Call call:TransformedBbox Assign" - }, - { - "library": "scipy", - "name": "Problem12", - "source_code": "class Problem12(Benchmark): def __init__(self, dimensions = 1): Benchmark.__init__(self, dimensions) self._bounds = [(0, 2 * pi)] self.global_optimum = pi self.fglob = -1 def fun(self, x, *args): self.nfev + = 1 x = x[0] return sin(x) ** 3.0 + cos(x) ** 3.0", - "docstring": "Univariate Problem12 objective function. This class defines the Univariate Problem12 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Problem12}}(x) = \\sin^3(x) + \\cos^3(x) Bound constraints: :math: .. figure:: figures/Problem12.png :alt: Univariate Problem12 function :align: center **Univariate Problem12 function** *Global optimum*: :math: for :math:", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_univariate.py", - "ast_data": "ClassDef name:Problem12 FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "essential_node_count", - "source_code": "def essential_node_count(self) -> int: return sum((1 for n in self.graph.nodes() if n.kind() not in self._EXCLUDED_NODE_KINDS))", - "docstring": "Return the number of nodes in the subgraph excluding those in .", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\verification.py", - "ast_data": "FunctionDef name:essential_node_count arguments arg:self Return return:yes" - }, - { - "library": "scikit-learn", - "name": "is_regressor", - "source_code": "def is_regressor(estimator): if isinstance(estimator, type): warnings.warn(f'passing a class to {print(inspect.stack()[0][3])} is deprecated and will be removed in 1.8. Use an instance of the class instead.', FutureWarning) return getattr(estimator, '_estimator_type', None) = = 'regressor' return get_tags(estimator).estimator_type = = 'regressor'", - "docstring": "Return True if the given estimator is (probably) a regressor. Parameters ---------- estimator : estimator instance Estimator object to test. Returns ------- out : bool True if estimator is a regressor and False otherwise. Examples -------- >>> from sklearn.base import is_regressor >>> from sklearn.cluster import KMeans >>> from sklearn.svm import SVC, SVR >>> classifier = SVC() >>> regressor = SVR() >>> kmeans = KMeans() >>> is_regressor(classifier) False >>> is_regressor(regressor) True >>> is_regressor(kmeans) False", - "type": "function", - "file_path": "scikit-learn\\sklearn\\base.py", - "ast_data": "FunctionDef name:is_regressor arguments arg:estimator If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "mongo", - "name": "execute_command_unack", - "source_code": "def execute_command_unack(self, conn: Connection) -> None: db_name = 'admin' cmd_name = 'bulkWrite' listeners = self.client._event_listeners op_id = _randint() bwc = self.bulk_ctx_class(db_name, cmd_name, conn, op_id, listeners, None, self.client.codec_options) while self.idx_offset < self.total_ops: cmd = {'bulkWrite': 1} cmd['errorsOnly'] = True cmd['ordered'] = False if self.bypass_doc_val is not None: cmd['bypassDocumentValidation'] = self.bypass_doc_val cmd['writeConcern'] = {'w': 0} if self.comment: cmd['comment'] = self.comment if self.let: cmd['let'] = self.let conn.add_server_api(cmd) ops = islice(self.ops, self.idx_offset, None) namespaces = islice(self.namespaces, self.idx_offset, None) to_send_ops, _ = self._execute_batch_unack(bwc, cmd, ops, namespaces) self.idx_offset + = len(to_send_ops)", - "docstring": "Execute commands with OP_MSG and w=0 writeConcern. Always unordered.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\client_bulk.py", - "ast_data": "FunctionDef name:execute_command_unack arguments arg:self arg:conn type:Connection Assign Assign Assign Assign Call call:_randint Assign Call call:bulk_ctx_class While Compare op:Lt Assign Assign Assign If Compare op:IsNot Assign Assign If Assign If Assign Assign Call call:islice Assign Call call:islice Assign Call call:_execute_batch_unack" - }, - { - "library": "tensorflow", - "name": "sparse_read", - "source_code": "def sparse_read(self, indices, name = None): with ops.name_scope('Gather' if name is None else name) as name: variable_accessed(self) value = gen_resource_variable_ops.resource_gather(self.handle, indices, dtype = self._dtype, name = name) if self._dtype = = dtypes.variant: handle_data = get_eager_safe_handle_data(self.handle) if handle_data.is_set and len(handle_data.shape_and_type) > 1: value._handle_data = cpp_shape_inference_pb2.CppShapeInferenceResult.HandleData(is_set = True, shape_and_type = handle_data.shape_and_type[1:]) return array_ops.identity(value) return value", - "docstring": "Reads the value of this variable sparsely, using .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", - "ast_data": "FunctionDef name:sparse_read arguments arg:self arg:indices arg:name With Assign Call call:resource_gather If Compare op:Eq Assign Call call:get_eager_safe_handle_data If BoolOp Compare op:Gt Assign Call call:HandleData Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "var", - "source_code": "@final def var(self, ddof: int = 1, numeric_only: bool = False): return self._downsample('var', ddof = ddof, numeric_only = numeric_only)", - "docstring": "Compute variance of groups, excluding missing values. Parameters ---------- ddof : int, default 1 Degrees of freedom. numeric_only : bool, default False Include only , or data. .. versionadded:: 1.5.0 .. versionchanged:: 2.0.0 numeric_only now defaults to ``. Returns ------- DataFrame or Series Variance of values within each group. See Also -------- core.resample.Resampler.std : Compute standard deviation of groups, excluding missing values. core.resample.Resampler.mean : Compute mean of groups, excluding missing values. core.resample.Resampler.median : Compute median of groups, excluding missing values. Examples -------- >>> ser = pd.Series( ... [1, 3, 2, 4, 3, 8], ... index=pd.DatetimeIndex( ... [ ... \"2023-01-01\", ... \"2023-01-10\", ... \"2023-01-15\", ... \"2023-02-01\", ... \"2023-02-10\", ... \"2023-02-15\", ... ] ... ), ... ) >>> ser.resample(\"MS\").var() 2023-01-01 1.0 2023-02-01 7.0 Freq: MS, dtype: float64 >>> ser.resample(\"MS\").var(ddof=0) 2023-01-01 0.666667 2023-02-01 4.666667 Freq: MS, dtype: float64", - "type": "method", - "file_path": "pandas\\pandas\\core\\resample.py", - "ast_data": "FunctionDef name:var arguments arg:self arg:ddof type:int arg:numeric_only type:bool Return return:yes" - }, - { - "library": "scikit-learn", - "name": "is_writeable_array", - "source_code": "def is_writeable_array(x: object) -> bool: cls = cast(Hashable, type(x)) if _issubclass_fast(cls, 'numpy', 'ndarray'): return cast('npt.NDArray', x).flags.writeable res = _is_writeable_cls(cls) if res is not None: return res return hasattr(x, '__array_namespace__')", - "docstring": "Return False if `x` is not an array API compatible object. Warning ------- As there is no standard way to check if an array is writeable without actually writing to it, this function blindly returns True for all unknown array types.", - "type": "function", - "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\common\\_helpers.py", - "ast_data": "FunctionDef name:is_writeable_array arguments arg:x type:object Assign Call call:cast If Call call:_issubclass_fast Return return:yes Assign Call call:_is_writeable_cls If Compare op:IsNot Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "ExecutionState", - "source_code": "class ExecutionState(Enum): NONE = auto() WARMUP = auto() RECORDING = auto() EXECUTION = auto()", - "docstring": "Represents the state of the CUDAGraph Tree. Will be None if there is no live current memory allocated in the cuda graph pool. Otherwise will reflect the state of the most recently executed node.", - "type": "class", - "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py", - "ast_data": "ClassDef name:ExecutionState Assign Call call:auto Assign Call call:auto Assign Call call:auto Assign Call call:auto" - }, - { - "library": "mongo", - "name": "AsyncDatabaseChangeStream", - "source_code": "class AsyncDatabaseChangeStream(AsyncChangeStream[_DocumentType]): _target: AsyncDatabase[_DocumentType] @property def _aggregation_command_class(self) -> Type[_DatabaseAggregationCommand]: return _DatabaseAggregationCommand @property def _client(self) -> AsyncMongoClient[_DocumentType]: return self._target.client", - "docstring": "A change stream that watches changes on all collections in a database. Should not be called directly by application developers. Use helper method :meth: instead. .. versionadded:: 3.7", - "type": "class", - "file_path": "mongo\\pymongo\\asynchronous\\change_stream.py", - "ast_data": "ClassDef name:AsyncDatabaseChangeStream FunctionDef name:_aggregation_command_class arguments arg:self Return return:yes FunctionDef name:_client arguments arg:self Return return:yes" - }, - { - "library": "prospector", - "name": "filter_messages", - "source_code": "def filter_messages(filepaths: list[Path], messages: list[Message], tools: Optional[dict[str, ToolBase]] = None, blending: bool = False, blend_combos: Optional[list[list[tuple[str, str]]]] = None) -> list[Message]: paths_to_ignore, lines_to_ignore, messages_to_ignore = get_suppressions(filepaths, messages, tools, blending, blend_combos) filtered = [] for message in messages: relative_message_path = message.location.path if message.source = = 'pylint' and message.code in ('suppressed-message', 'file-ignored'): continue if relative_message_path in paths_to_ignore: continue if relative_message_path in lines_to_ignore and message.location.line in lines_to_ignore[relative_message_path]: continue if relative_message_path in messages_to_ignore and message.location.line in messages_to_ignore[relative_message_path]: matched = False for ignore in messages_to_ignore[relative_message_path][message.location.line]: if (ignore.source is None or message.source = = ignore.source) and message.code in ignore.code: matched = True continue if matched: continue filtered.append(message) return filtered", - "docstring": "This method post-processes all messages output by all tools, in order to filter out any based on the overall output. The main aim currently is to use information about messages suppressed by pylint due to inline comments, and use that to suppress messages from other tools representing the same problem. For example: import banana # pylint:disable=unused-import In this situation, pylint will not warn about an unused import as there is inline configuration to disable the warning. Pyflakes will still raise that error, however, because it does not understand pylint disabling messages. This method uses the information about suppressed messages from pylint to squash the unwanted redundant error from pyflakes and frosted.", - "type": "function", - "file_path": "prospector\\prospector\\postfilter.py", - "ast_data": "FunctionDef name:filter_messages arguments arg:filepaths type:list[Path] arg:messages type:list[Message] arg:tools type:Optional[dict[str, ToolBase]] arg:blending type:bool arg:blend_combos type:Optional[list[list[tuple[str, str]]]] Assign Call call:get_suppressions Assign For Assign If BoolOp Compare op:Eq Compare op:In If Compare op:In If BoolOp Compare op:In Compare op:In If BoolOp Compare op:In Compare op:In Assign For If BoolOp BoolOp Compare op:Is Compare op:Eq Compare op:In Assign If Return return:yes" - }, - { - "library": "scipy", - "name": "getdtype", - "source_code": "def getdtype(dtype, a = None, default = None): if dtype is None: try: newdtype = a.dtype except AttributeError as e: if default is not None: newdtype = np.dtype(default) else: raise TypeError('could not interpret data type') from e else: newdtype = np.dtype(dtype) if newdtype not in supported_dtypes: supported_dtypes_fmt = ', '.join((t.__name__ for t in supported_dtypes)) raise ValueError(f'scipy.sparse does not support dtype {newdtype}. The only supported types are: {supported_dtypes_fmt}.') return newdtype", - "docstring": "Form a supported numpy dtype based on input arguments. Returns a valid `dtypedefault`: bool_, int8, uint8, int16, uint16, int32, uint32, int64, uint64, longlong, ulonglong, float32, float64, longdouble, complex64, complex128, clongdouble", - "type": "function", - "file_path": "scipy\\scipy\\sparse\\_sputils.py", - "ast_data": "FunctionDef name:getdtype arguments arg:dtype arg:a arg:default If Compare op:Is Try Assign ExceptHandler If Compare op:IsNot Assign Call call:dtype Raise raises:TypeError('could not interpret data type') Assign Call call:dtype If Compare op:NotIn Assign Call call:join Raise raises:ValueError(f'scipy.sparse does not support dtype {newdtype}. The only supported types are: {supported_dtypes_fmt}.') Return return:yes" - }, - { - "library": "pandas", - "name": "data_orientation", - "source_code": "@property def data_orientation(self) -> tuple[int, ...]: return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes], [int(a.axis) for a in self.index_axes]))", - "docstring": "return a tuple of my permutated axes, non_indexable at the front", - "type": "method", - "file_path": "pandas\\pandas\\io\\pytables.py", - "ast_data": "FunctionDef name:data_orientation arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "set_configuration_from_input_tensors", - "source_code": "def set_configuration_from_input_tensors(self, input_tensors): if len(input_tensors) ! = self.number_of_tuple_elements: raise ValueError(f'input_tensors is {str(input_tensors)}, but should be a list of {self.number_of_tuple_elements} Tensors') self.set_tuple_shapes([t.shape for t in input_tensors]) self.set_tuple_types([t.dtype for t in input_tensors])", - "docstring": "Sets the shapes and types of the queue tuple elements. input_tensors is a list of Tensors whose types and shapes are used to set the queue configuration. Args: input_tensors: list of Tensors of the same types and shapes as the desired queue Tuple. Raises: ValueError: if input_tensors is not a list of length self.number_of_tuple_elements", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py", - "ast_data": "FunctionDef name:set_configuration_from_input_tensors arguments arg:self arg:input_tensors If Compare op:NotEq Raise raises:ValueError(f'input_tensors is {str(input_tensors)}, but should be a list of {self.number_of_tuple_elements} Tensors')" - }, - { - "library": "tensorflow", - "name": "replace_inplace", - "source_code": "def replace_inplace(directory, search, to_replace) -> None: for root, _, files in os.walk(directory): for file_name in files: if file_name.endswith('.py'): file_path = os.path.join(root, file_name) with open(file_path, 'r', encoding = 'utf-8') as file: filedata = file.read() if search in filedata: filedata = filedata.replace(search, to_replace) with open(file_path, 'w') as file: file.write(filedata)", - "docstring": "Traverse the directory and replace search phrase in each file.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\tools\\pip_package\\utils\\utils.py", - "ast_data": "FunctionDef name:replace_inplace arguments arg:directory arg:search arg:to_replace For Call call:walk For If Call call:endswith Assign Call call:join With Assign Call call:read If Compare op:In Assign Call call:replace With" - }, - { - "library": "django", - "name": "add_arguments", - "source_code": "def add_arguments(self, parser): pass", - "docstring": "Entry point for subclassed commands to add custom arguments.", - "type": "method", - "file_path": "django\\django\\core\\management\\base.py", - "ast_data": "FunctionDef name:add_arguments arguments arg:self arg:parser" - }, - { - "library": "kornia", - "name": "rescale", - "source_code": "def rescale(input: Tensor, factor: Union[float, Tuple[float, float]], interpolation: str = 'bilinear', align_corners: Optional[bool] = None, antialias: bool = False) -> Tensor: if isinstance(factor, float): factor_vert = factor_horz = factor else: factor_vert, factor_horz = factor height, width = input.size()[-2:] size = (int(height * factor_vert), int(width * factor_horz)) return resize(input, size, interpolation = interpolation, align_corners = align_corners, antialias = antialias)", - "docstring": "Rescale the input Tensor with the given factor. .. image:: _static/img/rescale.png Args: input: The image tensor to be scale with shape of :math:. factor: Desired scaling factor in each direction. If scalar, the value is used for both the x- and y-direction. interpolation: algorithm used for upsampling: ``. antialias: if True, then image will be filtered with Gaussian before downscaling. No effect for upscaling. Returns: The rescaled tensor with the shape as the specified size. Example: >>> img = torch.rand(1, 3, 4, 4) >>> out = rescale(img, (2, 3)) >>> print(out.shape) torch.Size([1, 3, 8, 12])", - "type": "function", - "file_path": "kornia\\kornia\\geometry\\transform\\affwarp.py", - "ast_data": "FunctionDef name:rescale arguments arg:input type:Tensor arg:factor type:Union[float, Tuple[float, float]] arg:interpolation type:str arg:align_corners type:Optional[bool] arg:antialias type:bool If Call call:isinstance Assign Assign Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "bessel_i0e", - "source_code": "@tf_export('math.bessel_i0e', 'math.special.bessel_i0e') @dispatch.register_unary_elementwise_api @dispatch.add_dispatch_support def bessel_i0e(x, name = None): with ops.name_scope(name, 'bessel_i0e', [x]): return gen_special_math_ops.bessel_i0e(x)", - "docstring": "Computes the Bessel i0e function of element-wise. Modified Bessel function of order 0. >>> tf.math.special.bessel_i0e([-1., -0.5, 0.5, 1.]).numpy() array([0.46575961, 0.64503527, 0.64503527, 0.46575961], dtype=float32) Args: x: A or . Must be one of the following types: , , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.i0e @end_compatibility", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py", - "ast_data": "FunctionDef name:bessel_i0e arguments arg:x arg:name Call call:tf_export With Return return:yes" - }, - { - "library": "pytorch", - "name": "make_cell", - "source_code": "def make_cell(val = None): x = val def f(): return x assert f.__closure__ is not None and len(f.__closure__) = = 1 return f.__closure__[0]", - "docstring": "Some black magic to create a cell object that usually only exists in a closure", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\utils.py", - "ast_data": "FunctionDef name:make_cell arguments arg:val Assign FunctionDef name:f arguments Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "call", - "source_code": "def call(self, inputs, state): _check_rnn_cell_input_dtypes([inputs, state]) sigmoid = math_ops.sigmoid one = constant_op.constant(1, dtype = dtypes.int32) if self._state_is_tuple: c, h = state else: c, h = array_ops.split(value = state, num_or_size_splits = 2, axis = one) gate_inputs = math_ops.matmul(array_ops.concat([inputs, h], 1), self._kernel) gate_inputs = nn_ops.bias_add(gate_inputs, self._bias) i, j, f, o = array_ops.split(value = gate_inputs, num_or_size_splits = 4, axis = one) forget_bias_tensor = constant_op.constant(self._forget_bias, dtype = f.dtype) add = math_ops.add multiply = math_ops.multiply new_c = add(multiply(c, sigmoid(add(f, forget_bias_tensor))), multiply(sigmoid(i), self._activation(j))) new_h = multiply(self._activation(new_c), sigmoid(o)) if self._state_is_tuple: new_state = LSTMStateTuple(new_c, new_h) else: new_state = array_ops.concat([new_c, new_h], 1) return (new_h, new_state)", - "docstring": "Long short-term memory cell (LSTM). Args: inputs: tensor with shape . state: An of state tensors, each shaped , if has been set to . Otherwise, a shaped . Returns: A pair containing the new hidden state, and the new state (either a or a concatenated state, depending on ).", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\layers\\legacy_rnn\\rnn_cell_impl.py", - "ast_data": "FunctionDef name:call arguments arg:self arg:inputs arg:state Assign Assign Call call:constant If Assign Assign Call call:split Assign Call call:matmul Assign Call call:bias_add Assign Call call:split Assign Call call:constant Assign Assign Assign Call call:add Assign Call call:multiply If Assign Call call:LSTMStateTuple Assign Call call:concat Return return:yes" - }, - { - "library": "scipy", - "name": "sphere_intersections", - "source_code": "def sphere_intersections(z, d, trust_radius, entire_line = False): if norm(d) = = 0: return (0, 0, False) if np.isinf(trust_radius): if entire_line: ta = -np.inf tb = np.inf else: ta = 0 tb = 1 intersect = True return (ta, tb, intersect) a = np.dot(d, d) b = 2 * np.dot(z, d) c = np.dot(z, z) - trust_radius ** 2 discriminant = b * b - 4 * a * c if discriminant < 0: intersect = False return (0, 0, intersect) sqrt_discriminant = np.sqrt(discriminant) aux = b + copysign(sqrt_discriminant, b) ta = -aux / (2 * a) tb = -2 * c / aux ta, tb = sorted([ta, tb]) if entire_line: intersect = True elif tb < 0 or ta > 1: intersect = False ta = 0 tb = 0 else: intersect = True ta = max(0, ta) tb = min(1, tb) return (ta, tb, intersect)", - "docstring": "Find the intersection between segment (or line) and spherical constraints. Find the intersection between the segment (or line) defined by the parametric equation ``, there is no intersection.", - "type": "function", - "file_path": "scipy\\scipy\\optimize\\_trustregion_constr\\qp_subproblem.py", - "ast_data": "FunctionDef name:sphere_intersections arguments arg:z arg:d arg:trust_radius arg:entire_line If Compare op:Eq Return return:yes If Call call:isinf If Assign Assign Assign Assign Assign Return return:yes Assign Call call:dot Assign Assign Assign If Compare op:Lt Assign Return return:yes Assign Call call:sqrt Assign Assign Assign Assign Call call:sorted If Assign If BoolOp Compare op:Lt Compare op:Gt Assign Assign Assign Assign Assign Call call:max Assign Call call:min Return return:yes" - }, - { - "library": "scipy", - "name": "asfptype", - "source_code": "def asfptype(self): return self._asfptype()", - "docstring": "Upcast matrix to a floating point format (if necessary)", - "type": "method", - "file_path": "scipy\\scipy\\sparse\\_matrix.py", - "ast_data": "FunctionDef name:asfptype arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "tick_left", - "source_code": "def tick_left(self): label = True if 'label1On' in self._major_tick_kw: label = self._major_tick_kw['label1On'] or self._major_tick_kw['label2On'] self.set_ticks_position('left') self.set_tick_params(which = 'both', labelleft = label)", - "docstring": "Move ticks and ticklabels (if present) to the left of the Axes.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axis.py", - "ast_data": "FunctionDef name:tick_left arguments arg:self Assign If Compare op:In Assign BoolOp" - }, - { - "library": "kornia", - "name": "__mul__", - "source_code": "def __mul__(self, right: Se3) -> Se3 | Vector3 | Tensor: so3 = self.so3 t = self.t if isinstance(right, Se3): return self._mul_se3(right) elif isinstance(right, (Vector3, Tensor)): return so3 * right + t.data else: raise TypeError(f'Unsupported type: {type(right)}')", - "docstring": "Compose two Se3 transformations. Args: right: the other Se3 transformation. Return: The resulting Se3 transformation.", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py", - "ast_data": "FunctionDef name:__mul__ arguments arg:self arg:right type:Se3 Assign Assign If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes Raise raises:TypeError(f'Unsupported type: {type(right)}')" - }, - { - "library": "numpy", - "name": "geterrcall", - "source_code": "@set_module('numpy') def geterrcall(): return _get_extobj_dict()['call']", - "docstring": "Return the current callback function used on floating-point errors. When the error handling for a floating-point error (one of \"divide\", \"over\", \"under\", or \"invalid\") is set to 'call' or 'log', the function that is called or the log instance that is written to is returned by . This function or log instance has been set with . Returns ------- errobj : callable, log instance or None The current error handler. If no handler was set through , `seterr`. Examples -------- >>> import numpy as np >>> np.geterrcall() # we did not yet set a handler, returns None >>> orig_settings = np.seterr(all='call') >>> def err_handler(type, flag): ... print(\"Floating point error (%s), with flag %s\" % (type, flag)) >>> old_handler = np.seterrcall(err_handler) >>> np.array([1, 2, 3]) / 0.0 Floating point error (divide by zero), with flag 1 array([inf, inf, inf]) >>> cur_handler = np.geterrcall() >>> cur_handler is err_handler True >>> old_settings = np.seterr(**orig_settings) # restore original >>> old_handler = np.seterrcall(None) # restore original", - "type": "function", - "file_path": "numpy\\numpy\\_core\\_ufunc_config.py", - "ast_data": "FunctionDef name:geterrcall arguments Call call:set_module Return return:yes" - }, - { - "library": "flexx", - "name": "pointer_click", - "source_code": "@event.emitter def pointer_click(self, e): return self._create_pointer_event(e)", - "docstring": "Event emitted when mouse-button/touchpad/screen is clicked. See pointer_down() for a description of the event object.", - "type": "method", - "file_path": "flexx\\flexx\\ui\\_widget.py", - "ast_data": "FunctionDef name:pointer_click arguments arg:self arg:e Return return:yes" - }, - { - "library": "pytorch", - "name": "normalize_to_torch_size", - "source_code": "def normalize_to_torch_size(size) -> torch.Size: if isinstance(size, torch.Size): return size if isinstance(size, int): torch_size = [size] elif len(size) = = 1 and isinstance(size[0], Sequence): torch_size = list(size[0]) else: torch_size = list(size) return torch.Size(torch_size)", - "docstring": "Unify variable types of size argument to torch.Size Acceptable types include: int, Sequence[int], Tuple[int], Tuple[Sequence[int]], or torch.Size", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\tensor\\_utils.py", - "ast_data": "FunctionDef name:normalize_to_torch_size arguments arg:size If Call call:isinstance Return return:yes If Call call:isinstance Assign If BoolOp Compare op:Eq Call call:isinstance Assign Call call:list Assign Call call:list Return return:yes" - }, - { - "library": "django", - "name": "ConcatPair", - "source_code": "class ConcatPair(Func): function = 'CONCAT' def pipes_concat_sql(self, compiler, connection, **extra_context): coalesced = self.coalesce() return super(ConcatPair, coalesced).as_sql(compiler, connection, template = '(%(expressions)s)', arg_joiner = ' || ', **extra_context) as_sqlite = pipes_concat_sql def as_postgresql(self, compiler, connection, **extra_context): c = self.copy() c.set_source_expressions([expression if isinstance(expression.output_field, (CharField, TextField)) else Cast(expression, TextField()) for expression in c.get_source_expressions()]) return c.pipes_concat_sql(compiler, connection, **extra_context) def as_mysql(self, compiler, connection, **extra_context): return super().as_sql(compiler, connection, function = 'CONCAT_WS', template = \"%(function)s('', %(expressions)s)\", **extra_context) def coalesce(self): c = self.copy() c.set_source_expressions([Coalesce(expression, Value('')) for expression in c.get_source_expressions()]) return c", - "docstring": "Concatenate two arguments together. This is used by because not all backend databases support more than two arguments.", - "type": "class", - "file_path": "django\\django\\db\\models\\functions\\text.py", - "ast_data": "ClassDef name:ConcatPair Assign FunctionDef name:pipes_concat_sql arguments arg:self arg:compiler arg:connection kwarg:extra_context Assign Call call:coalesce Return return:yes Assign FunctionDef name:as_postgresql arguments arg:self arg:compiler arg:connection kwarg:extra_context Assign Call call:copy Return return:yes FunctionDef name:as_mysql arguments arg:self arg:compiler arg:connection kwarg:extra_context Return return:yes FunctionDef name:coalesce arguments arg:self Assign Call call:copy Return return:yes" - }, - { - "library": "scikit-learn", - "name": "split", - "source_code": "def split(self, X, y = None, groups = None): if groups is not None: warnings.warn(f'The groups parameter is ignored by {self.__class__.__name__}', UserWarning) return super().split(X, y, groups = groups)", - "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) The target variable for supervised learning problems. groups : object Always ignored, exists for compatibility. Yields ------ train : ndarray The training set indices for that split. test : ndarray The testing set indices for that split.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py", - "ast_data": "FunctionDef name:split arguments arg:self arg:X arg:y arg:groups If Compare op:IsNot Return return:yes" - }, - { - "library": "pandas", - "name": "describe", - "source_code": "def describe(self) -> DataFrame: counts = self.value_counts(dropna = False) freqs = counts / counts.sum() from pandas import Index from pandas.core.reshape.concat import concat result = concat([counts, freqs], ignore_index = True, axis = 1) result.columns = Index(['counts', 'freqs']) result.index.name = 'categories' return result", - "docstring": "Describes this Categorical Returns ------- description: A dataframe with frequency and counts by category.", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\categorical.py", - "ast_data": "FunctionDef name:describe arguments arg:self Assign Call call:value_counts Assign Assign Call call:concat Assign Call call:Index Assign Return return:yes" - }, - { - "library": "sphinx", - "name": "get_numfig_title", - "source_code": "def get_numfig_title(self, node: Node) -> str | None: if self.is_enumerable_node(node): elem = cast('Element', node) _, title_getter = self.enumerable_nodes.get(elem.__class__, (None, None)) if title_getter: return title_getter(elem) else: for subnode in elem: if isinstance(subnode, nodes.caption | nodes.title): return clean_astext(subnode) return None", - "docstring": "Get the title of enumerable nodes to refer them using its title", - "type": "method", - "file_path": "sphinx\\sphinx\\domains\\std\\__init__.py", - "ast_data": "FunctionDef name:get_numfig_title arguments arg:self arg:node type:Node If Call call:is_enumerable_node Assign Call call:cast Assign Call call:get If Return return:yes For If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "compile_submod", - "source_code": "def compile_submod(self, input_mod, args, kwargs): assert len(kwargs) = = 0, 'We assume only args for these modules' class WrapperModule(torch.nn.Module): def __init__(self, submod, unwrap_singleton_tuple) -> None: super().__init__() self.submod = submod self.unwrap_singleton_tuple = unwrap_singleton_tuple def forward(self, *args): x = self.submod(*args) if self.unwrap_singleton_tuple and isinstance(x, (tuple, list)): return x[0] return x unwrap_singleton_tuple = False for sn in input_mod.graph.nodes: if sn.op = = 'output': if not isinstance(sn.args[0], tuple): unwrap_singleton_tuple = True sn.args = (sn.args,) input_mod.recompile() input_mod.compile_subgraph_reason = GraphCompileReason('DDPOptimizer intentional graph-break (See Note [DDPOptimizer]). Set `torch._dynamo.config.optimize_ddp = False` to disable.', [traceback.FrameSummary(__file__, 0, DDPOptimizer)]) wrapper = WrapperModule(self.compiler(input_mod, args), unwrap_singleton_tuple) return wrapper", - "docstring": "Compile the submodule, using a wrapper to make sure its output is always a tuple, which is required by AotAutograd based compilers", - "type": "method", - "file_path": "pytorch\\torch\\_dynamo\\backends\\distributed.py", - "ast_data": "FunctionDef name:compile_submod arguments arg:self arg:input_mod arg:args arg:kwargs ClassDef name:WrapperModule FunctionDef name:__init__ arguments arg:self arg:submod arg:unwrap_singleton_tuple Assign Assign FunctionDef name:forward arguments arg:self vararg:args Assign Call call:submod If BoolOp Call call:isinstance Return return:yes Return return:yes Assign For If Compare op:Eq If Assign Assign Assign Call call:GraphCompileReason Assign Call call:WrapperModule Return return:yes" - }, - { - "library": "tensorflow", - "name": "stateless_random_uniform", - "source_code": "@polymorphic_function.function def stateless_random_uniform(shape, seed, layout): return api.relayout(stateless_random_ops.stateless_random_uniform(shape = shape, seed = seed), layout = layout)", - "docstring": "Creates uniform random tensor with the given layout.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\dtensor\\python\\numpy_util.py", - "ast_data": "FunctionDef name:stateless_random_uniform arguments arg:shape arg:seed arg:layout Return return:yes" - }, - { - "library": "pytorch", - "name": "collect_bw_donated_buffer_idxs", - "source_code": "def collect_bw_donated_buffer_idxs(fw_module: torch.fx.GraphModule, bw_module: torch.fx.GraphModule, fw_metadata: ViewAndMutationMeta) -> list[int]: if contain_metadata_mutation_ops(fw_module) or contain_metadata_mutation_ops(bw_module): return [] fw_ins = fw_module.graph.find_nodes(op = 'placeholder') bw_outs = next(reversed(bw_module.graph.find_nodes(op = 'output'))).args[0] fw_outs = next(reversed(fw_module.graph.find_nodes(op = 'output'))).args[0] fw_ins = [n.meta['val'] if hasattr(n, 'meta') and 'val' in n.meta else None for n in fw_ins] fw_outs = [n.meta['val'] if hasattr(n, 'meta') and 'val' in n.meta else None for n in fw_outs] bw_outs = [n.meta['val'] if hasattr(n, 'meta') and 'val' in n.meta else None for n in bw_outs] user_fw_outs = fw_outs[: fw_metadata.num_forward] saved_tensors = fw_outs[fw_metadata.tensors_saved_for_backwards_slice] fw_donated_buffer = collect_fw_donated_buffer_idxs(fw_ins, user_fw_outs, bw_outs, saved_tensors) assert fw_metadata.num_symints_saved_for_bw is not None return [fw_metadata.num_symints_saved_for_bw + i for i in fw_donated_buffer]", - "docstring": "Collects backward donated buffer indexes from fw_module and bw_module.", - "type": "function", - "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\jit_compile_runtime_wrappers.py", - "ast_data": "FunctionDef name:collect_bw_donated_buffer_idxs arguments arg:fw_module type:torch.fx.GraphModule arg:bw_module type:torch.fx.GraphModule arg:fw_metadata type:ViewAndMutationMeta If BoolOp Call call:contain_metadata_mutation_ops Call call:contain_metadata_mutation_ops Return return:yes Assign Call call:find_nodes Assign Assign Assign Assign Assign Assign Assign Assign Call call:collect_fw_donated_buffer_idxs Return return:yes" - }, - { - "library": "authlib", - "name": "create_authorization_url", - "source_code": "def create_authorization_url(self, url, request_token = None, **kwargs): kwargs['oauth_token'] = request_token or self.auth.token if self.auth.redirect_uri: kwargs['oauth_callback'] = self.auth.redirect_uri return add_params_to_uri(url, kwargs.items())", - "docstring": "Create an authorization URL by appending request_token and optional kwargs to url. This is the second step in the OAuth 1 workflow. The user should be redirected to this authorization URL, grant access to you, and then be redirected back to you. The redirection back can either be specified during client registration or by supplying a callback URI per request. :param url: The authorization endpoint URL. :param request_token: The previously obtained request token. :param kwargs: Optional parameters to append to the URL. :returns: The authorization URL with new parameters embedded.", - "type": "method", - "file_path": "authlib\\authlib\\oauth1\\client.py", - "ast_data": "FunctionDef name:create_authorization_url arguments arg:self arg:url arg:request_token kwarg:kwargs Assign BoolOp If Assign Return return:yes" - }, - { - "library": "scikit-learn", - "name": "predict_log_proba", - "source_code": "@available_if(_check_proba) def predict_log_proba(self, X): return np.log(self.predict_proba(X))", - "docstring": "Compute log probabilities of possible outcomes for samples in X. The model need to have probability information computed at training time: fit with attribute set to True. Parameters ---------- X : array-like of shape (n_samples, n_features) or (n_samples_test, n_samples_train) For kernel=\"precomputed\", the expected shape of X is (n_samples_test, n_samples_train). Returns ------- T : ndarray of shape (n_samples, n_classes) Returns the log-probabilities of the sample for each class in the model. The columns correspond to the classes in sorted order, as they appear in the attribute :term:. Notes ----- The probability model is created using cross validation, so the results can be slightly different than those obtained by predict. Also, it will produce meaningless results on very small datasets.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\svm\\_base.py", - "ast_data": "FunctionDef name:predict_log_proba arguments arg:self arg:X Call call:available_if Return return:yes" - }, - { - "library": "tensorflow", - "name": "enable_resource_variables", - "source_code": "@tf_export(v1 = ['enable_resource_variables']) def enable_resource_variables() -> None: global _DEFAULT_USE_RESOURCE _DEFAULT_USE_RESOURCE = True logging.vlog(1, 'Enabling resource variables') _api_usage_gauge.get_cell().set(True)", - "docstring": "Creates resource variables by default. Resource variables are improved versions of TensorFlow variables with a well-defined memory model. Accessing a resource variable reads its value, and all ops which access a specific read value of the variable are guaranteed to see the same value for that tensor. Writes which happen after a read (by having a control or data dependency on the read) are guaranteed not to affect the value of the read tensor, and similarly writes which happen before a read are guaranteed to affect the value. No guarantees are made about unordered read/write pairs. Calling tf.enable_resource_variables() lets you opt-in to this TensorFlow 2.0 feature.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variables_toggle.py", - "ast_data": "FunctionDef name:enable_resource_variables arguments Call call:tf_export Assign" - }, - { - "library": "tensorflow", - "name": "append", - "source_code": "def append(self, item): self._items.append(item)", - "docstring": "Append an item to the Menu. Args: item: (MenuItem) the item to be appended.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", - "ast_data": "FunctionDef name:append arguments arg:self arg:item" - }, - { - "library": "mongo", - "name": "generation_time", - "source_code": "@property def generation_time(self) -> datetime.datetime: timestamp = _UNPACK_INT(self.__id[0: 4])[0] return datetime.datetime.fromtimestamp(timestamp, utc)", - "docstring": "A :class: instance representing the time of generation for this :class:. The :class: is timezone aware, and represents the generation time in UTC. It is precise to the second.", - "type": "method", - "file_path": "mongo\\bson\\objectid.py", - "ast_data": "FunctionDef name:generation_time arguments arg:self Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "KeyEvent", - "source_code": "class KeyEvent(LocationEvent): def __init__(self, name, canvas, key, x = 0, y = 0, guiEvent = None): super().__init__(name, canvas, x, y, guiEvent = guiEvent) self.key = key", - "docstring": "A key event (key press, key release). A KeyEvent has a number of special attributes in addition to those defined by the parent and classes. Attributes ---------- key : None or str The key(s) pressed. Could be *None*, a single case sensitive Unicode character (\"g\", \"G\", \"#\", etc.), a special key (\"control\", \"shift\", \"f1\", \"up\", etc.) or a combination of the above (e.g., \"ctrl+alt+g\", \"ctrl+alt+G\"). Notes ----- Modifier keys will be prefixed to the pressed key and will be in the order \"ctrl\", \"alt\", \"super\". The exception to this rule is when the pressed key is itself a modifier key, therefore \"ctrl+alt\" and \"alt+control\" can both be valid key values. Examples -------- :: def on_key(event): print('you pressed', event.key, event.xdata, event.ydata) cid = fig.canvas.mpl_connect('key_press_event', on_key)", - "type": "class", - "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", - "ast_data": "ClassDef name:KeyEvent FunctionDef name:__init__ arguments arg:self arg:name arg:canvas arg:key arg:x arg:y arg:guiEvent Assign" - }, - { - "library": "tensorflow", - "name": "sparse_slice", - "source_code": "@tf_export('sparse.slice', v1 = ['sparse.slice', 'sparse_slice']) @deprecation.deprecated_endpoints('sparse_slice') def sparse_slice(sp_input, start, size, name = None): sp_input = _convert_to_sparse_tensor(sp_input) start = ops.convert_to_tensor(start, dtypes.int64) size = ops.convert_to_tensor(size, dtypes.int64) with ops.name_scope(name, 'SparseSlice', [sp_input]) as name: output_indices, output_values, output_shape = gen_sparse_ops.sparse_slice(sp_input.indices, sp_input.values, sp_input.dense_shape, start, size, name = name) return sparse_tensor.SparseTensor(output_indices, output_values, output_shape)", - "docstring": "Slice a based on the and . For example, if the input is input_tensor = shape = [2, 7] [ a d e ] [b c ] Graphically the output tensors are: sparse.slice([0, 0], [2, 4]) = shape = [2, 4] [ a ] [b c ] sparse.slice([0, 4], [2, 3]) = shape = [2, 3] [ d e ] [ ] Args: sp_input: The to split. start: 1-D. tensor represents the start of the slice. size: 1-D. tensor represents the size of the slice. name: A name for the operation (optional). Returns: A objects resulting from splicing. Raises: TypeError: If is not a .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\sparse_ops.py", - "ast_data": "FunctionDef name:sparse_slice arguments arg:sp_input arg:start arg:size arg:name Call call:tf_export Call call:deprecated_endpoints Assign Call call:_convert_to_sparse_tensor Assign Call call:convert_to_tensor Assign Call call:convert_to_tensor With Assign Call call:sparse_slice Return return:yes" - }, - { - "library": "tensorflow", - "name": "assert_zero_imag_part", - "source_code": "def assert_zero_imag_part(x, message = None, name = 'assert_zero_imag_part'): with ops.name_scope(name, values = [x]): x = tensor_conversion.convert_to_tensor_v2_with_dispatch(x, name = 'x') dtype = x.dtype.base_dtype if dtype.is_floating: return control_flow_ops.no_op() zero = tensor_conversion.convert_to_tensor_v2_with_dispatch(0, dtype = dtype.real_dtype) return check_ops.assert_equal(zero, math_ops.imag(x), message = message)", - "docstring": "Returns that asserts Tensor has no non-zero imaginary parts. Args: x: Numeric , real, integer, or complex. message: A string message to prepend to failure message. name: A name to give this . Returns: An that asserts has no entries with modulus zero.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator_util.py", - "ast_data": "FunctionDef name:assert_zero_imag_part arguments arg:x arg:message arg:name With Assign Call call:convert_to_tensor_v2_with_dispatch Assign If Return return:yes Assign Call call:convert_to_tensor_v2_with_dispatch Return return:yes" - }, - { - "library": "pytorch", - "name": "get_lr", - "source_code": "def get_lr(self) -> list[float]: raise NotImplementedError", - "docstring": "Compute learning rate using chainable form of the scheduler.", - "type": "method", - "file_path": "pytorch\\torch\\optim\\lr_scheduler.py", - "ast_data": "FunctionDef name:get_lr arguments arg:self Raise raises:NotImplementedError" - }, - { - "library": "tensorflow", - "name": "Reduction", - "source_code": "class Reduction(Enum): SUM = 'sum' SUM_OVER_BATCH_SIZE = 'sum_over_batch_size' WEIGHTED_MEAN = 'weighted_mean'", - "docstring": "Types of metrics reduction. Contains the following values: * : Scalar sum of weighted values. * : Scalar sum of weighted values divided by number of elements. * : Scalar sum of weighted values divided by sum of weights.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\metrics_utils.py", - "ast_data": "ClassDef name:Reduction Assign Assign Assign" - }, - { - "library": "sphinx", - "name": "directive", - "source_code": "def directive(self, name: str) -> type[Directive] | None: if name in self._directive_cache: return self._directive_cache[name] if name not in self.directives: return None fullname = f'{self.name}: {name}' BaseDirective = self.directives[name] class DirectiveAdapter(BaseDirective): def run(self) -> list[Node]: self.name = fullname return super().run() self._directive_cache[name] = DirectiveAdapter return DirectiveAdapter", - "docstring": "Return a directive adapter class that always gives the registered directive its full name ('domain:name') as ``.", - "type": "method", - "file_path": "sphinx\\sphinx\\domains\\__init__.py", - "ast_data": "FunctionDef name:directive arguments arg:self arg:name type:str If Compare op:In Return return:yes If Compare op:NotIn Return return:yes Assign Assign ClassDef name:DirectiveAdapter FunctionDef name:run arguments arg:self Assign Return return:yes Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "log_abs_determinant", - "source_code": "def log_abs_determinant(self, name = 'log_abs_det'): if self.is_square is False: raise NotImplementedError('Determinant not implemented for an operator that is expected to not be square.') with self._name_scope(name): return self._log_abs_determinant()", - "docstring": "Log absolute value of determinant for every batch member. Args: name: A name for this . Returns: with shape and same as . Raises: NotImplementedError: If is .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", - "ast_data": "FunctionDef name:log_abs_determinant arguments arg:self arg:name If Compare op:Is Raise raises:NotImplementedError('Determinant not implemented for an operator that is expected to not be square.') With Return return:yes" - }, - { - "library": "django", - "name": "is_same_domain", - "source_code": "def is_same_domain(host, pattern): if not pattern: return False pattern = pattern.lower() return pattern[0] = = '.' and (host.endswith(pattern) or host = = pattern[1:]) or pattern = = host", - "docstring": "Return ``). Anything else is an exact string match.", - "type": "function", - "file_path": "django\\django\\utils\\http.py", - "ast_data": "FunctionDef name:is_same_domain arguments arg:host arg:pattern If Return return:yes Assign Call call:lower Return return:yes" - }, - { - "library": "matplotlib", - "name": "disconnect", - "source_code": "def disconnect(self, cid): self._pickled_cids.discard(cid) for signal, proxy in self._func_cid_map: if self._func_cid_map[signal, proxy] = = cid: break else: return assert self.callbacks[signal][cid] = = proxy del self.callbacks[signal][cid] self._func_cid_map.pop((signal, proxy)) if len(self.callbacks[signal]) = = 0: del self.callbacks[signal]", - "docstring": "Disconnect the callback registered with callback id *cid*. No error is raised if such a callback does not exist.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", - "ast_data": "FunctionDef name:disconnect arguments arg:self arg:cid For If Compare op:Eq Return return:no If Compare op:Eq" - }, - { - "library": "tensorflow", - "name": "emit_counter", - "source_code": "def emit_counter(self, category: str, name: str, pid: int, timestamp: int, counter: str, value: int) -> None: event = self._create_event('C', category, name, pid, 0, timestamp) event['args'] = {counter: value} self._events.append(event)", - "docstring": "Emits a record for a single counter. Args: category: The event category as a string. name: The event name as a string. pid: Identifier of the process generating this event as an integer. timestamp: The timestamp of this event as a long integer. counter: Name of the counter as a string. value: Value of the counter as an integer.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\client\\timeline.py", - "ast_data": "FunctionDef name:emit_counter arguments arg:self arg:category type:str arg:name type:str arg:pid type:int arg:timestamp type:int arg:counter type:str arg:value type:int Assign Call call:_create_event Assign" - }, - { - "library": "pytorch", - "name": "post_compile", - "source_code": "def post_compile(wrappers: list[CompilerWrapper], compiled_fn: Callable, aot_config: AOTConfig, *, runtime_metadata: ViewAndMutationMeta) -> tuple[Callable, ViewAndMutationMeta]: for wrapper in reversed(wrappers): compiled_fn = wrapper.post_compile(compiled_fn, aot_config, runtime_metadata = runtime_metadata) return (compiled_fn, runtime_metadata)", - "docstring": "Runs a sequence of wrappers on the given function. Should be called after pre_compile()", - "type": "function", - "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\runtime_wrappers.py", - "ast_data": "FunctionDef name:post_compile arguments arg:wrappers type:list[CompilerWrapper] arg:compiled_fn type:Callable arg:aot_config type:AOTConfig For Call call:reversed Assign Call call:post_compile Return return:yes" - }, - { - "library": "scikit-learn", - "name": "sort", - "source_code": "def sort(x: Array, /, *, axis: int = -1, descending: py_bool = False, stable: py_bool = True) -> Array: x, restore = _ensure_single_chunk(x, axis) meta_xp = array_namespace(x._meta) x = da.map_blocks(meta_xp.sort, x, axis = axis, meta = x._meta, dtype = x.dtype, descending = descending, stable = stable) return restore(x)", - "docstring": "Array API compatibility layer around the lack of sort() in Dask. Warnings -------- This function temporarily rechunks the array along to a single chunk. This can be extremely inefficient and can lead to out-of-memory errors. See the corresponding documentation in the array library and/or the array API specification for more details.", - "type": "function", - "file_path": "scikit-learn\\sklearn\\externals\\array_api_compat\\dask\\array\\_aliases.py", - "ast_data": "FunctionDef name:sort arguments Assign Call call:_ensure_single_chunk Assign Call call:array_namespace Assign Call call:map_blocks Return return:yes" - }, - { - "library": "pytorch", - "name": "current_stream", - "source_code": "def current_stream(device: Optional[_device_t] = None) -> Stream: _lazy_init() streamdata = torch._C._xpu_getCurrentStream(_get_device_index(device, optional = True)) return Stream(stream_id = streamdata[0], device_index = streamdata[1], device_type = streamdata[2])", - "docstring": "Return the currently selected :class: for a given device. Args: device (torch.device or int, optional): selected device. Returns the currently selected :class: for the current device, given by :func:, if :attr: is `` (default).", - "type": "function", - "file_path": "pytorch\\torch\\xpu\\__init__.py", - "ast_data": "FunctionDef name:current_stream arguments arg:device type:Optional[_device_t] Assign Call call:_xpu_getCurrentStream Return return:yes" - }, - { - "library": "feincms", - "name": "PreviewHandler", - "source_code": "class PreviewHandler(Handler): def get_object(self): if len(self.args) < 2: return super().get_object() page = get_object_or_404(self.page_model, pk = self.args[1]) self.request.path = page.get_absolute_url() return page def handler(self, request, *args, **kwargs): if not request.user.is_staff: raise Http404('Not found (not allowed)') response = super().handler(request, *args, **kwargs) response['Cache-Control'] = 'no-cache, must-revalidate, no-store, private' return response", - "docstring": "Preview handler The methods used in this handler should not be considered official API. *** Everything here is subject to change. ***", - "type": "class", - "file_path": "feincms\\feincms\\contrib\\preview\\views.py", - "ast_data": "ClassDef name:PreviewHandler FunctionDef name:get_object arguments arg:self If Compare op:Lt Return return:yes Assign Call call:get_object_or_404 Assign Call call:get_absolute_url Return return:yes FunctionDef name:handler arguments arg:self arg:request vararg:args kwarg:kwargs If Raise raises:Http404('Not found (not allowed)') Assign Call call:handler Assign Return return:yes" - }, - { - "library": "django", - "name": "check_password", - "source_code": "def check_password(self, raw_password): def setter(raw_password): self.set_password(raw_password) self._password = None self.save(update_fields = ['password']) return check_password(raw_password, self.password, setter)", - "docstring": "Return a boolean of whether the raw_password was correct. Handles hashing formats behind the scenes.", - "type": "method", - "file_path": "django\\django\\contrib\\auth\\base_user.py", - "ast_data": "FunctionDef name:check_password arguments arg:self arg:raw_password FunctionDef name:setter arguments arg:raw_password Assign Return return:yes" - }, - { - "library": "pandas", - "name": "delegate_names", - "source_code": "def delegate_names(delegate, accessors: list[str], typ: str, overwrite: bool = False, accessor_mapping: Callable[[str], str] = lambda x: x, raise_on_missing: bool = True): def add_delegate_accessors(cls): cls._add_delegate_accessors(delegate, accessors, typ, overwrite = overwrite, accessor_mapping = accessor_mapping, raise_on_missing = raise_on_missing) return cls return add_delegate_accessors", - "docstring": "Add delegated names to a class using a class decorator. This provides an alternative usage to directly calling below a class definition. Parameters ---------- delegate : object The class to get methods/properties & doc-strings. accessors : Sequence[str] List of accessor to add. typ : {'property', 'method'} overwrite : bool, default False Overwrite the method/property in the target class if it exists. accessor_mapping: Callable, default lambda x: x Callable to map the delegate's function to the cls' function. raise_on_missing: bool, default True Raise if an accessor does not exist on delegate. False skips the missing accessor. Returns ------- callable A class decorator. Examples -------- @delegate_names(Categorical, [\"categories\", \"ordered\"], \"property\") class CategoricalAccessor(PandasDelegate): [...]", - "type": "function", - "file_path": "pandas\\pandas\\core\\accessor.py", - "ast_data": "FunctionDef name:delegate_names arguments arg:delegate arg:accessors type:list[str] arg:typ type:str arg:overwrite type:bool arg:accessor_mapping type:Callable[[str], str] arg:raise_on_missing type:bool FunctionDef name:add_delegate_accessors arguments arg:cls Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "close", - "source_code": "def close(self) -> None: if self.handles is not None: self.handles.close()", - "docstring": "If we opened a stream earlier, in _get_data_from_filepath, we should close it. If an open stream or file was passed, we leave it open.", - "type": "method", - "file_path": "pandas\\pandas\\io\\json\\_json.py", - "ast_data": "FunctionDef name:close arguments arg:self If Compare op:IsNot" - }, - { - "library": "pytorch", - "name": "placements", - "source_code": "@property def placements(self) -> tuple[Placement, ...]: return self._spec.placements", - "docstring": "The placements attribute of this DTensor that describes the layout of this DTensor on the its DeviceMesh. .. note:: `` is a read-only property, it can not be set.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py", - "ast_data": "FunctionDef name:placements arguments arg:self Return return:yes" - }, - { - "library": "flexx", - "name": "user_selected", - "source_code": "@event.emitter def user_selected(self, selected): d = {'old_value': self.selected, 'new_value': selected} self.set_selected(selected) return d", - "docstring": "Event emitted when the user (un)selects this item. Has `` attributes. One can call this emitter directly to emulate a user-selection, but note that this bypasses the max_selected policy.", - "type": "method", - "file_path": "flexx\\flexx\\ui\\widgets\\_tree.py", - "ast_data": "FunctionDef name:user_selected arguments arg:self arg:selected Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "write_filepath", - "source_code": "def write_filepath(filepath, strategy): dirpath = os.path.dirname(filepath) base = os.path.basename(filepath) return os.path.join(write_dirpath(dirpath, strategy), base)", - "docstring": "Returns the writing file path to be used to save file distributedly. Directory to contain would be created if it doesn't exist. Args: filepath: Original filepath that would be used without distribution. strategy: The tf.distribute strategy object currently used. Returns: The writing filepath that should be used to save file with distribution.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distributed_file_utils.py", - "ast_data": "FunctionDef name:write_filepath arguments arg:filepath arg:strategy Assign Call call:dirname Assign Call call:basename Return return:yes" - }, - { - "library": "scrapy", - "name": "replace", - "source_code": "def replace(self, *args: Any, cls: type[Request] | None = None, **kwargs: Any) -> Request: for x in self.attributes: kwargs.setdefault(x, getattr(self, x)) if cls is None: cls = self.__class__ return cls(*args, **kwargs)", - "docstring": "Create a new Request with the same attributes except for those given new values", - "type": "method", - "file_path": "scrapy\\scrapy\\http\\request\\__init__.py", - "ast_data": "FunctionDef name:replace arguments arg:self vararg:args kwarg:kwargs For If Compare op:Is Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_build_info", - "source_code": "@tf_export('sysconfig.get_build_info') def get_build_info(): return build_info.build_info", - "docstring": "Get a dictionary describing TensorFlow's build environment. Values are generated when TensorFlow is compiled, and are static for each TensorFlow package. The return value is a dictionary with string keys such as: - cuda_version - cudnn_version - is_cuda_build - is_rocm_build - msvcp_dll_names - nvcuda_dll_name - cudart_dll_name - cudnn_dll_name Note that the actual keys and values returned by this function is subject to change across different versions of TensorFlow or across platforms. Returns: A Dictionary describing TensorFlow's build environment.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\platform\\sysconfig.py", - "ast_data": "FunctionDef name:get_build_info arguments Call call:tf_export Return return:yes" - }, - { - "library": "tensorflow", - "name": "sample", - "source_code": "def sample(self, sample_shape = (), seed = None, name = 'sample'): return self._call_sample_n(sample_shape, seed, name)", - "docstring": "Generate samples of the specified shape. Note that a call to without arguments will generate a single sample. Args: sample_shape: 0D or 1D . Shape of the generated samples. seed: Python integer seed for RNG name: name to give to the op. Returns: samples: a with prepended dimensions .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py", - "ast_data": "FunctionDef name:sample arguments arg:self arg:sample_shape arg:seed arg:name Return return:yes" - }, - { - "library": "pandas", - "name": "get_source_files", - "source_code": "def get_source_files(source_path: str) -> typing.Generator[str, None, None]: for root, dirs, fnames in os.walk(source_path): root_rel_path = os.path.relpath(root, source_path) for fname in fnames: yield os.path.join(root_rel_path, fname)", - "docstring": "Generate the list of files present in the source directory.", - "type": "function", - "file_path": "pandas\\web\\pandas_web.py", - "ast_data": "FunctionDef name:get_source_files arguments arg:source_path type:str For Call call:walk Assign Call call:relpath For" - }, - { - "library": "scipy", - "name": "n_th_moment", - "source_code": "def n_th_moment(n, beta, m): A = (m / beta) ** m * np.exp(-beta ** 2 / 2.0) B = m / beta - beta rhs = 2 ** ((n - 1) / 2.0) * sc.gamma((n + 1) / 2) * (1.0 + (-1) ** n * sc.gammainc((n + 1) / 2, beta ** 2 / 2)) lhs = np.zeros(rhs.shape) for k in range(int(n) + 1): lhs + = sc.binom(n, k) * B ** (n - k) * (-1) ** k / (m - k - 1) * (m / beta) ** (-m + k + 1) return A * lhs + rhs", - "docstring": "Returns n-th moment. Defined only if n+1 < m Function cannot broadcast due to the loop over n", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_continuous_distns.py", - "ast_data": "FunctionDef name:n_th_moment arguments arg:n arg:beta arg:m Assign Assign Assign Assign Call call:zeros For Call call:range Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, name = None): self._name = name self._items = []", - "docstring": "Menu constructor. Args: name: (str or None) name of this menu.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\debugger_cli_common.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:name Assign Assign" - }, - { - "library": "matplotlib", - "name": "use_sticky_edges", - "source_code": "@property def use_sticky_edges(self): return self._use_sticky_edges", - "docstring": "When autoscaling, whether to obey all . Default is `autoscaleautoscale_view` is called.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", - "ast_data": "FunctionDef name:use_sticky_edges arguments arg:self Return return:yes" - }, - { - "library": "mongo", - "name": "download_to_stream_by_name", - "source_code": "@_csot.apply def download_to_stream_by_name(self, filename: str, destination: Any, revision: int = -1, session: Optional[ClientSession] = None) -> None: with self.open_download_stream_by_name(filename, revision, session = session) as gout: while True: chunk = gout.readchunk() if not len(chunk): break destination.write(chunk)", - "docstring": "Write the contents of (with optional ) to . For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) # Get file to write to file = open('myfile','wb') fs.download_to_stream_by_name(\"test_file\", file) Raises :exc: if no such version of that file exists. Raises :exc: if is not a string. :param filename: The name of the file to read from. :param destination: A file-like object that implements :meth:. :param revision: Which revision (documents with the same filename and different uploadDate) of the file to retrieve. Defaults to -1 (the most recent revision). :param session: a :class: :Note: Revision numbers are defined as follows: - 0 = the original stored file - 1 = the first revision - 2 = the second revision - etc... - -2 = the second most recent revision - -1 = the most recent revision .. versionchanged:: 3.6 Added `` parameter.", - "type": "method", - "file_path": "mongo\\gridfs\\synchronous\\grid_file.py", - "ast_data": "FunctionDef name:download_to_stream_by_name arguments arg:self arg:filename type:str arg:destination type:Any arg:revision type:int arg:session type:Optional[ClientSession] With While Assign Call call:readchunk If" - }, - { - "library": "pytorch", - "name": "Thunk", - "source_code": "class Thunk(Generic[R]): f: Optional[Callable[[], R]] r: Optional[R] __slots__ = ['f', 'r'] def __init__(self, f: Callable[[], R]): self.f = f self.r = None def force(self) -> R: if self.f is None: return self.r self.r = self.f() self.f = None return self.r", - "docstring": "A simple lazy evaluation implementation that lets you delay execution of a function. It properly handles releasing the function once it is forced.", - "type": "class", - "file_path": "pytorch\\torch\\utils\\_thunk.py", - "ast_data": "ClassDef name:Thunk Assign FunctionDef name:__init__ arguments arg:self arg:f type:Callable[[], R] Assign Assign FunctionDef name:force arguments arg:self If Compare op:Is Return return:yes Assign Call call:f Assign Return return:yes" - }, - { - "library": "sphinx", - "name": "build_navpoints", - "source_code": "def build_navpoints(self, nodes: list[dict[str, Any]]) -> list[NavPoint]: navstack: list[NavPoint] = [NavPoint('dummy', 0, '', '', [])] level = 0 lastnode = None for node in nodes: if not node['text']: continue file = node['refuri'].split('#')[0] if file in self.ignored_files: continue if node['level'] > self.config.epub_tocdepth: continue if node['level'] = = level: navpoint = self.new_navpoint(node, level) navstack.pop() navstack[-1].children.append(navpoint) navstack.append(navpoint) elif node['level'] = = level + 1: level + = 1 if lastnode and self.config.epub_tocdup: navstack[-1].children.append(self.new_navpoint(lastnode, level, False)) navpoint = self.new_navpoint(node, level) navstack[-1].children.append(navpoint) navstack.append(navpoint) elif node['level'] < level: while node['level'] < len(navstack): navstack.pop() level = node['level'] navpoint = self.new_navpoint(node, level) navstack[-1].children.append(navpoint) navstack.append(navpoint) else: msg = __('node has an invalid level') raise ValueError(msg) lastnode = node return navstack[0].children", - "docstring": "Create the toc navigation structure. Subelements of a node are nested inside the navpoint. For nested nodes the parent node is reinserted in the subnav.", - "type": "method", - "file_path": "sphinx\\sphinx\\builders\\_epub_base.py", - "ast_data": "FunctionDef name:build_navpoints arguments arg:self arg:nodes type:list[dict[str, Any]] Assign Assign For If Assign If Compare op:In If Compare op:Gt If Compare op:Eq Assign Call call:new_navpoint If Compare op:Eq If BoolOp Assign Call call:new_navpoint If Compare op:Lt While Compare op:Lt Assign Assign Call call:new_navpoint Assign Call call:__ Raise raises:ValueError(msg) Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "get_process_group", - "source_code": "@staticmethod def get_process_group(func, args) -> ProcessGroup: if func in CollectiveOp.PG_ARG_1: return ProcessGroup.unbox(args[1]) if func in CollectiveOp.PG_ARG_2: return ProcessGroup.unbox(args[2]) if func in CollectiveOp.PG_ARG_3: return _resolve_process_group(args[2]) if func in CollectiveOp.PG_ARG_4: return _resolve_process_group(args[3]) raise TypeError(f'Func {func} not found in {collective_ops}')", - "docstring": "Retrieve the process group for collective operations, except .", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\_tools\\fake_collectives.py", - "ast_data": "FunctionDef name:get_process_group arguments arg:func arg:args If Compare op:In Return return:yes If Compare op:In Return return:yes If Compare op:In Return return:yes If Compare op:In Return return:yes Raise raises:TypeError(f'Func {func} not found in {collective_ops}')" - }, - { - "library": "pytorch", - "name": "dequantize_per_tensor", - "source_code": "@impl(quantized_decomposed_lib, 'dequantize_per_tensor', 'CompositeExplicitAutograd') def dequantize_per_tensor(input: torch.Tensor, scale: float, zero_point: int, quant_min: int, quant_max: int, dtype: torch.dtype, *, out_dtype: Optional[torch.dtype] = None) -> torch.Tensor: assert input.dtype = = dtype, f'Expecting input to have dtype: {dtype}, but got {input.dtype}' if out_dtype is None: out_dtype = torch.float32 if dtype in _DTYPE_TO_QVALUE_BOUNDS: return (input.to(out_dtype) - zero_point) * scale else: raise ValueError(f'Unsupported dtype in dequantize_per_tensor: {dtype}')", - "docstring": "Affine dequantization for the Tensor using the same quantization parameters to map from quantized values to floating point values Args: input (torch.Tensor): Tensor with dtype matching argument, e.g. (), it is a per tensor quantized Tensor if combined with quantization parameters in the argument of this function (scale/zero_point) scale (float): quantization parameter for affine quantization zero_point (int): quantization parameter for affine quantization quant_min (int): minimum quantized value for input Tensor (not used in computation, reserved for pattern matching) quant_max (int): maximum quantized value for input Tensor (not used in computation, reserved for pattern matching) dtype (torch.dtype): dtype for input Tensor (not used in computation, reserved for pattern matching) out_dtype (torch.dtype?): optional dtype for output Tensor Returns: dequantized float32 Tensor", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\fx\\_decomposed.py", - "ast_data": "FunctionDef name:dequantize_per_tensor arguments arg:input type:torch.Tensor arg:scale type:float arg:zero_point type:int arg:quant_min type:int arg:quant_max type:int arg:dtype type:torch.dtype Call call:impl If Compare op:Is Assign If Compare op:In Return return:yes Raise raises:ValueError(f'Unsupported dtype in dequantize_per_tensor: {dtype}')" - }, - { - "library": "scikit-learn", - "name": "decision_function", - "source_code": "def decision_function(self, X): check_is_fitted(self) X = self._check_X(X) n_classes = self.n_classes_ classes = self.classes_[:, np.newaxis] if n_classes = = 1: return np.zeros_like(X, shape = (X.shape[0], 1)) pred = sum((np.where((estimator.predict(X) = = classes).T, w, -1 / (n_classes - 1) * w) for estimator, w in zip(self.estimators_, self.estimator_weights_))) pred / = self.estimator_weights_.sum() if n_classes = = 2: pred[:, 0] * = -1 return pred.sum(axis = 1) return pred", - "docstring": "Compute the decision function of `classes_`, respectively.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\ensemble\\_weight_boosting.py", - "ast_data": "FunctionDef name:decision_function arguments arg:self arg:X Assign Call call:_check_X Assign Assign If Compare op:Eq Return return:yes Assign Call call:sum If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "get_custom_module_class_keys", - "source_code": "def get_custom_module_class_keys(custom_module_mapping: dict[QuantType, dict[type, type]]) -> list[Any]: float_custom_module_classes: set[Any] = set() for quant_mode in [QuantType.STATIC, QuantType.DYNAMIC, QuantType.WEIGHT_ONLY]: quant_mode_custom_module_config = custom_module_mapping.get(quant_mode, {}) quant_mode_custom_module_classes = set(quant_mode_custom_module_config.keys()) float_custom_module_classes | = quant_mode_custom_module_classes return list(float_custom_module_classes)", - "docstring": "Get all the unique custom module keys in the custom config dict e.g. Input: { QuantType.STATIC: { CustomModule1: ObservedCustomModule }, QuantType.DYNAMIC: { CustomModule2: DynamicObservedCustomModule }, QuantType.WEIGHT_ONLY: { CustomModule3: WeightOnlyObservedCustomModule }, } Output: # extract the keys across all inner STATIC, DYNAMIC, and WEIGHT_ONLY dicts [CustomModule1, CustomModule2, CustomModule3]", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\fx\\utils.py", - "ast_data": "FunctionDef name:get_custom_module_class_keys arguments arg:custom_module_mapping type:dict[QuantType, dict[type, type]] For Assign Call call:get Assign Call call:set Return return:yes" - }, - { - "library": "scikit-learn", - "name": "all_functions", - "source_code": "def all_functions(): from ._testing import ignore_warnings all_functions = [] root = str(Path(__file__).parent.parent) with ignore_warnings(category = FutureWarning): for _, module_name, _ in pkgutil.walk_packages(path = [root], prefix = 'sklearn.'): module_parts = module_name.split('.') if any((part in _MODULE_TO_IGNORE for part in module_parts)) or '._' in module_name: continue module = import_module(module_name) functions = inspect.getmembers(module, _is_checked_function) functions = [(func.__name__, func) for name, func in functions if not name.startswith('_')] all_functions.extend(functions) return sorted(set(all_functions), key = itemgetter(0))", - "docstring": "Get a list of all functions from . Returns ------- functions : list of tuples List of (name, function), where `` is the actual function. Examples -------- >>> from sklearn.utils.discovery import all_functions >>> functions = all_functions() >>> name, function = functions[0] >>> name 'accuracy_score'", - "type": "function", - "file_path": "scikit-learn\\sklearn\\utils\\discovery.py", - "ast_data": "FunctionDef name:all_functions arguments Assign Assign Call call:str With For Call call:walk_packages Assign Call call:split If BoolOp Call call:any Compare op:In Assign Call call:import_module Assign Call call:getmembers Assign Return return:yes" - }, - { - "library": "scipy", - "name": "reset", - "source_code": "def reset(self) -> 'QMCEngine': rng = copy.deepcopy(self.rng_seed) self.rng = check_random_state(rng) self.num_generated = 0 return self", - "docstring": "Reset the engine to base state. Returns ------- engine : QMCEngine Engine reset to its base state.", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_qmc.py", - "ast_data": "FunctionDef name:reset arguments arg:self Assign Call call:deepcopy Assign Call call:check_random_state Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "axisinfo", - "source_code": "def axisinfo(self, unit, axis): tz = unit majloc = AutoDateLocator(tz = tz, interval_multiples = self._interval_multiples) majfmt = AutoDateFormatter(majloc, tz = tz) datemin = datetime.date(1970, 1, 1) datemax = datetime.date(1970, 1, 2) return units.AxisInfo(majloc = majloc, majfmt = majfmt, label = '', default_limits = (datemin, datemax))", - "docstring": "Return the for *unit*. *unit* is a instance or None. The *axis* argument is required but not used.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\dates.py", - "ast_data": "FunctionDef name:axisinfo arguments arg:self arg:unit arg:axis Assign Assign Call call:AutoDateLocator Assign Call call:AutoDateFormatter Assign Call call:date Assign Call call:date Return return:yes" - }, - { - "library": "tensorflow", - "name": "source_file_list", - "source_code": "def source_file_list(self): return tuple(self._host_name_file_path_to_offset.keys())", - "docstring": "Get a list of source files known to the debugger data reader. Returns: A tuple of tuples.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", - "ast_data": "FunctionDef name:source_file_list arguments arg:self Return return:yes" - }, - { - "library": "sphinx", - "name": "toc_add_files", - "source_code": "def toc_add_files(self, refnodes: list[dict[str, Any]]) -> None: refnodes.insert(0, {'level': 1, 'refuri': html.escape(self.config.root_doc + self.out_suffix), 'text': ssp(html.escape(self.env.titles[self.config.root_doc].astext()))}) for file, text in reversed(self.config.epub_pre_files): refnodes.insert(0, {'level': 1, 'refuri': html.escape(file), 'text': ssp(html.escape(text))}) for file, text in self.config.epub_post_files: refnodes.append({'level': 1, 'refuri': html.escape(file), 'text': ssp(html.escape(text))})", - "docstring": "Add the root_doc, pre and post files to a list of refnodes.", - "type": "method", - "file_path": "sphinx\\sphinx\\builders\\_epub_base.py", - "ast_data": "FunctionDef name:toc_add_files arguments arg:self arg:refnodes type:list[dict[str, Any]] For Call call:reversed For" - }, - { - "library": "pytorch", - "name": "ConvTranspose3d", - "source_code": "class ConvTranspose3d(nnq.ConvTranspose3d): _FLOAT_MODULE: ClassVar[type[nn.ConvTranspose3d]] = nn.ConvTranspose3d def __init__(self, in_channels, out_channels, kernel_size, stride = 1, padding = 0, output_padding = 0, groups = 1, bias = True, dilation = 1, padding_mode = 'zeros', device = None, dtype = None): warnings.warn(f'The current implementation of the {self._get_name()} module has poor numerical accuracy and its use is not recommended') factory_kwargs = {'device': device, 'dtype': dtype} super().__init__(in_channels, out_channels, kernel_size, stride, padding, output_padding, groups, bias, dilation, padding_mode, **factory_kwargs) def _get_name(self): return 'DynamicQuantizedConvTranspose3d' def forward(self, input: Tensor, reduce_range: bool = True) -> Tensor: if len(input.shape) ! = 5: raise ValueError('Input shape must be `(N, C, T, H, W)`!') return ops.quantized.conv_transpose3d_dynamic(input, self._packed_params, reduce_range)", - "docstring": "A dynamically quantized transposed convolution module with floating point tensors as inputs and outputs. For details on input arguments, parameters, and implementation see :class:. For special notes, please, see :class: Attributes: weight (Tensor): packed tensor derived from the learnable weight parameter. scale (Tensor): scalar for the output scale zero_point (Tensor): scalar for the output zero point See :class: for other attributes. Examples:: >>> # xdoctest: +SKIP >>> # With cubic kernels and equal stride >>> m = nnq.ConvTranspose3d(16, 33, 3, stride=2) >>> # non-cubic kernels and unequal stride and with padding >>> m = nnq.ConvTranspose3d(16, 33, (3, 3, 5), stride=(2, 1, 1), padding=(4, 2, 2)) >>> output = m(input) >>> # exact output size can be also specified as an argument >>> downsample = nnq.Conv3d(16, 16, 3, stride=2, padding=1) >>> upsample = nnq.ConvTranspose3d(16, 16, 3, stride=2, padding=1) >>> h = downsample(input) >>> h.size() torch.Size([1, 16, 6, 6, 6]) >>> output = upsample(h, output_size=input.size()) >>> output.size() torch.Size([1, 16, 12, 12, 12])", - "type": "class", - "file_path": "pytorch\\torch\\ao\\nn\\quantized\\dynamic\\modules\\conv.py", - "ast_data": "ClassDef name:ConvTranspose3d FunctionDef name:__init__ arguments arg:self arg:in_channels arg:out_channels arg:kernel_size arg:stride arg:padding arg:output_padding arg:groups arg:bias arg:dilation arg:padding_mode arg:device arg:dtype Assign FunctionDef name:_get_name arguments arg:self Return return:yes FunctionDef name:forward arguments arg:self arg:input type:Tensor arg:reduce_range type:bool If Compare op:NotEq Raise raises:ValueError('Input shape must be `(N, C, T, H, W)`!') Return return:yes" - }, - { - "library": "django", - "name": "references_model", - "source_code": "def references_model(self, name, app_label): return True", - "docstring": "Return True if there is a chance this operation references the given model name (as a string), with an app label for accuracy. Used for optimization. If in doubt, return True; returning a false positive will merely make the optimizer a little less efficient, while returning a false negative may result in an unusable optimized migration.", - "type": "method", - "file_path": "django\\django\\db\\migrations\\operations\\base.py", - "ast_data": "FunctionDef name:references_model arguments arg:self arg:name arg:app_label Return return:yes" - }, - { - "library": "matplotlib", - "name": "mark_inset", - "source_code": "@_docstring.interpd def mark_inset(parent_axes, inset_axes, loc1, loc2, **kwargs): rect = _TransformedBboxWithCallback(inset_axes.viewLim, parent_axes.transData, callback = parent_axes._unstale_viewLim) kwargs.setdefault('fill', bool({'fc', 'facecolor', 'color'}.intersection(kwargs))) pp = BboxPatch(rect, **kwargs) parent_axes.add_patch(pp) p1 = BboxConnector(inset_axes.bbox, rect, loc1 = loc1, **kwargs) inset_axes.add_patch(p1) p1.set_clip_on(False) p2 = BboxConnector(inset_axes.bbox, rect, loc1 = loc2, **kwargs) inset_axes.add_patch(p2) p2.set_clip_on(False) return (pp, p1, p2)", - "docstring": "Draw a box to mark the location of an area represented by an inset axes. This function draws a box in *parent_axes* at the bounding box of *inset_axes*, and shows a connection with the inset axes by drawing lines at the corners, giving a \"zoomed in\" effect. Parameters ---------- parent_axes : Axes which contains the area of the inset axes. inset_axes : The inset axes. loc1, loc2 : {1, 2, 3, 4} Corners to use for connecting the inset axes and the area in the parent axes. **kwargs Patch properties for the lines and box drawn: %(Patch:kwdoc)s Returns ------- pp : The patch drawn to represent the area of the inset axes. p1, p2 : The patches connecting two corners of the inset axes and its area.", - "type": "function", - "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\inset_locator.py", - "ast_data": "FunctionDef name:mark_inset arguments arg:parent_axes arg:inset_axes arg:loc1 arg:loc2 kwarg:kwargs Assign Call call:_TransformedBboxWithCallback Assign Call call:BboxPatch Assign Call call:BboxConnector Assign Call call:BboxConnector Return return:yes" - }, - { - "library": "cherrypy", - "name": "publish", - "source_code": "def publish(self, channel, *args, **kwargs): if channel not in self.listeners: return [] exc = ChannelFailures() output = [] raw_items = ((self._priorities[channel, listener], listener) for listener in self.listeners[channel]) items = sorted(raw_items, key = operator.itemgetter(0)) for priority, listener in items: try: output.append(listener(*args, **kwargs)) except KeyboardInterrupt: raise except SystemExit: e = sys.exc_info()[1] if exc and e.code = = 0: e.code = 1 raise except Exception: exc.handle_exception() if channel = = 'log': pass else: self.log('Error in %r listener %r' % (channel, listener), level = 40, traceback = True) if exc: raise exc return output", - "docstring": "Return output of all subscribers for the given channel.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\process\\wspbus.py", - "ast_data": "FunctionDef name:publish arguments arg:self arg:channel vararg:args kwarg:kwargs If Compare op:NotIn Return return:yes Assign Call call:ChannelFailures Assign Assign Assign Call call:sorted For Try ExceptHandler Raise ExceptHandler Assign If BoolOp Compare op:Eq Assign Raise ExceptHandler If Compare op:Eq If Raise raises:exc Return return:yes" - }, - { - "library": "kornia", - "name": "predict", - "source_code": "def predict(self, x: Tensor) -> Tensor: KORNIA_CHECK(x.shape[1] = = self.cluster_centers.shape[1], f'Dimensions at position 1 of x and cluster_centers do not match. {x.shape[1]} ! = {self.cluster_centers.shape[1]}') distance = self._pairwise_euclidean_distance(x, self.cluster_centers) cluster_assignment = distance.argmin(-1) return cluster_assignment", - "docstring": "Find the cluster center closest to each point in x. Args: x: 2D tensor Returns: 1D tensor containing cluster id assigned to each data point in x", - "type": "method", - "file_path": "kornia\\kornia\\contrib\\kmeans.py", - "ast_data": "FunctionDef name:predict arguments arg:self arg:x type:Tensor Assign Call call:_pairwise_euclidean_distance Assign Call call:argmin Return return:yes" - }, - { - "library": "django", - "name": "within", - "source_code": "def within(self, other): return capi.geos_within(self.ptr, other.ptr)", - "docstring": "Return true if the DE-9IM intersection matrix for the two Geometries is T*F**F***.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", - "ast_data": "FunctionDef name:within arguments arg:self arg:other Return return:yes" - }, - { - "library": "matplotlib", - "name": "data", - "source_code": "def data(self, text): self.__data.append(text)", - "docstring": "Add character data to the output stream. Parameters ---------- text : str Character data.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_svg.py", - "ast_data": "FunctionDef name:data arguments arg:self arg:text" - }, - { - "library": "sphinx", - "name": "SearchLanguage", - "source_code": "class SearchLanguage: lang: str = '' language_name: str = '' stopwords: Set[str] = frozenset() js_splitter_code: str = '' js_stemmer_rawcode: str = '' js_stemmer_code = '\\n/**\\n * Dummy stemmer for languages without stemming rules.\\n */\\nvar Stemmer = function () {\\n this.stemWord = function (w) {\\n return w;\\n };\\n};\\n' _word_re = re.compile('\\\\w+') def __init__(self, options: dict[str, str]) -> None: self.options = options def split(self, input: str) -> list[str]: return self._word_re.findall(input) def stem(self, word: str) -> str: return word def word_filter(self, word: str) -> bool: return not word.isdigit() and word not in self.stopwords", - "docstring": "This class is the base class for search natural language preprocessors. If you want to add support for a new language, you should override the methods of this class. You should override class property too (e.g. 'en', 'fr' and so on). .. attribute:: stopwords This is a set of stop words of the target language. Default is empty. This word is used for building index and embedded in JS. .. attribute:: js_splitter_code Return splitter function of JavaScript version. The function should be named as `` method. This string is embedded as-is in searchtools.js. This class is used to preprocess search word which Sphinx HTML readers type, before searching index. Default implementation does nothing.", - "type": "class", - "file_path": "sphinx\\sphinx\\search\\__init__.py", - "ast_data": "ClassDef name:SearchLanguage Assign Assign Call call:compile FunctionDef name:__init__ arguments arg:self arg:options type:dict[str, str] Assign FunctionDef name:split arguments arg:self arg:input type:str Return return:yes FunctionDef name:stem arguments arg:self arg:word type:str Return return:yes FunctionDef name:word_filter arguments arg:self arg:word type:str Return return:yes" - }, - { - "library": "matplotlib", - "name": "convert_yunits", - "source_code": "def convert_yunits(self, y): ax = getattr(self, 'axes', None) if ax is None or ax.yaxis is None: return y return ax.yaxis.convert_units(y)", - "docstring": "Convert *y* using the unit type of the yaxis. If the artist is not contained in an Axes or if the yaxis does not have units, *y* itself is returned.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\artist.py", - "ast_data": "FunctionDef name:convert_yunits arguments arg:self arg:y Assign Call call:getattr If BoolOp Compare op:Is Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "prepare_softmax_replacement", - "source_code": "def prepare_softmax_replacement(x, dim): from torch._inductor.inductor_prims import prepare_softmax_online xmax, xsum = prepare_softmax_online(x, dim) xsub = x - xmax return (xmax, xsum, xsub, xsub.exp())", - "docstring": "Return xsub since otherwise log-softmax can not be matched due to a use of this intermediate node. Same reason to return xsub.exp() for softmax.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\fx_passes\\post_grad.py", - "ast_data": "FunctionDef name:prepare_softmax_replacement arguments arg:x arg:dim Assign Call call:prepare_softmax_online Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "stat", - "source_code": "@tf_export(v1 = ['gfile.Stat']) def stat(filename): return stat_v2(filename)", - "docstring": "Returns file statistics for a given path. Args: filename: string, path to a file Returns: FileStatistics struct that contains information about the path Raises: errors.OpError: If the operation fails.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", - "ast_data": "FunctionDef name:stat arguments arg:filename Call call:tf_export Return return:yes" - }, - { - "library": "tensorflow", - "name": "copy_assets_to_destination_dir", - "source_code": "def copy_assets_to_destination_dir(asset_filename_map, destination_dir, saved_files = None): if saved_files is None: saved_files = set() assets_destination_dir = path_helpers.get_or_create_assets_dir(destination_dir) for asset_basename, asset_source_filepath in asset_filename_map.items(): asset_destination_filepath = file_io.join(compat.as_bytes(assets_destination_dir), compat.as_bytes(asset_basename)) if file_io.file_exists(asset_source_filepath) and asset_source_filepath ! = asset_destination_filepath and (asset_destination_filepath not in saved_files): file_io.copy(asset_source_filepath, asset_destination_filepath, overwrite = True) saved_files.add(asset_destination_filepath) tf_logging.info('Assets written to: %s', compat.as_text(assets_destination_dir))", - "docstring": "Copy all assets from source path to destination path. Args: asset_filename_map: a dict of filenames used for saving the asset in the SavedModel to full paths from which the filenames were derived. destination_dir: the destination directory that assets are stored in. saved_files: a set of destination filepaths that have already been copied and will be skipped", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\saved_model\\builder_impl.py", - "ast_data": "FunctionDef name:copy_assets_to_destination_dir arguments arg:asset_filename_map arg:destination_dir arg:saved_files If Compare op:Is Assign Call call:set Assign Call call:get_or_create_assets_dir For Call call:items Assign Call call:join If BoolOp Call call:file_exists Compare op:NotEq Compare op:NotIn" - }, - { - "library": "django", - "name": "from_current_timezone", - "source_code": "def from_current_timezone(value): if settings.USE_TZ and value is not None and timezone.is_naive(value): current_timezone = timezone.get_current_timezone() try: if timezone._datetime_ambiguous_or_imaginary(value, current_timezone): raise ValueError('Ambiguous or non-existent time.') return timezone.make_aware(value, current_timezone) except Exception as exc: raise ValidationError(_('%(datetime)s couldn’t be interpreted in time zone %(current_timezone)s; it may be ambiguous or it may not exist.'), code = 'ambiguous_timezone', params = {'datetime': value, 'current_timezone': current_timezone}) from exc return value", - "docstring": "When time zone support is enabled, convert naive datetimes entered in the current time zone to aware datetimes.", - "type": "function", - "file_path": "django\\django\\forms\\utils.py", - "ast_data": "FunctionDef name:from_current_timezone arguments arg:value If BoolOp Compare op:IsNot Call call:is_naive Assign Call call:get_current_timezone Try If Call call:_datetime_ambiguous_or_imaginary Raise raises:ValueError('Ambiguous or non-existent time.') Return return:yes ExceptHandler Raise raises:ValidationError(_('%(datetime)s couldn’t be interpreted in time zone %(current_timezone)s; it may be ambiguous or it may not exist.'), code='ambiguous_timezone', params={'datetime': value, 'current_timezone': current_timezone}) Return return:yes" - }, - { - "library": "django", - "name": "force_no_ordering", - "source_code": "def force_no_ordering(self): return []", - "docstring": "Return a list used in the \"ORDER BY\" clause to force no ordering at all. Return an empty list to include nothing in the ordering.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\operations.py", - "ast_data": "FunctionDef name:force_no_ordering arguments arg:self Return return:yes" - }, - { - "library": "pandas", - "name": "__eq__", - "source_code": "def __eq__(self, other: object) -> bool: if isinstance(other, str): try: other = self.construct_from_string(other) except TypeError: return False if isinstance(other, type(self)): return all((getattr(self, attr) = = getattr(other, attr) for attr in self._metadata)) return False", - "docstring": "Check whether 'other' is equal to self. By default, 'other' is considered equal if either * it's a string matching 'self.name'. * it's an instance of this type and all of the attributes in `selfother`. Parameters ---------- other : Any Returns ------- bool", - "type": "method", - "file_path": "pandas\\pandas\\core\\dtypes\\base.py", - "ast_data": "FunctionDef name:__eq__ arguments arg:self arg:other type:object If Call call:isinstance Try Assign Call call:construct_from_string ExceptHandler Return return:yes If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "get_executorch_backend_config", - "source_code": "def get_executorch_backend_config() -> BackendConfig: return BackendConfig('executorch').set_backend_pattern_configs(_get_linear_configs()).set_backend_pattern_configs(_get_conv_configs()).set_backend_pattern_configs(_get_binary_ops_configs()).set_backend_pattern_configs(_get_share_qparams_ops_configs()).set_backend_pattern_configs(_get_bn_configs()).set_backend_pattern_configs(_get_cat_configs()).set_backend_pattern_configs(_get_embedding_op_configs())", - "docstring": "Return the for backends PyTorch lowers to through the Executorch stack.", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\executorch.py", - "ast_data": "FunctionDef name:get_executorch_backend_config arguments Return return:yes" - }, - { - "library": "sphinx", - "name": "write_temporary_file", - "source_code": "def write_temporary_file(content: str) -> str: import tempfile with tempfile.NamedTemporaryFile('w', encoding = 'utf-8', suffix = '.log', prefix = 'sphinx-err-', delete = False) as f: f.write(content) return f.name", - "docstring": "Write content to a temporary file and return the filename.", - "type": "function", - "file_path": "sphinx\\sphinx\\_cli\\util\\errors.py", - "ast_data": "FunctionDef name:write_temporary_file arguments arg:content type:str With Return return:yes" - }, - { - "library": "tensorflow", - "name": "pretty_printed_signature", - "source_code": "def pretty_printed_signature(self, verbose = True): assert self.function_type is not None if verbose: return repr(self.function_type) else: return str(self.function_type)", - "docstring": "Returns a string summarizing the signature of this concrete function.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\eager\\polymorphic_function\\concrete_function.py", - "ast_data": "FunctionDef name:pretty_printed_signature arguments arg:self arg:verbose If Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "AttributeMutationExisting", - "source_code": "class AttributeMutationExisting(AttributeMutation): def __init__(self): super().__init__(SourceType.Existing)", - "docstring": "This case of VariableTracker.mutation_type marker indicates 1. Dynamo allows mutation on the value's attributes. 2. The value exists before Dynamo tracing started. For instance, Dynamo could model a pre-existing object with this marker, indicating that if we encounter mutations to this object, we need to buffer then re-apply those mutations after the graph runs, since the object might be used afterwards in Python.", - "type": "class", - "file_path": "pytorch\\torch\\_dynamo\\variables\\base.py", - "ast_data": "ClassDef name:AttributeMutationExisting FunctionDef name:__init__ arguments arg:self" - }, - { - "library": "tensorflow", - "name": "random_flip_up_down", - "source_code": "@tf_export('image.random_flip_up_down') @dispatch.add_dispatch_support def random_flip_up_down(image, seed = None): random_func = functools.partial(random_ops.random_uniform, seed = seed) return _random_flip(image, 0, random_func, 'random_flip_up_down')", - "docstring": "Randomly flips an image vertically (upside down). With a 1 in 2 chance, outputs the contents of flipped along the first dimension, which is . Otherwise, output the image as-is. When passing a batch of images, each image will be randomly flipped independent of other images. Example usage: >>> image = np.array([[[1], [2]], [[3], [4]]]) >>> tf.image.random_flip_up_down(image, 3).numpy().tolist() [[[3], [4]], [[1], [2]]] Randomly flip multiple images. >>> images = np.array( ... [ ... [[[1], [2]], [[3], [4]]], ... [[[5], [6]], [[7], [8]]] ... ]) >>> tf.image.random_flip_up_down(images, 4).numpy().tolist() [[[[3], [4]], [[1], [2]]], [[[5], [6]], [[7], [8]]]] For producing deterministic results given a value, use . Unlike using the param with ops, ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: 4-D Tensor of shape or 3-D Tensor of shape . seed: A Python integer. Used to create a random seed. See for behavior. Returns: A tensor of the same type and shape as . Raises: ValueError: if the shape of not supported.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\image_ops_impl.py", - "ast_data": "FunctionDef name:random_flip_up_down arguments arg:image arg:seed Call call:tf_export Assign Call call:partial Return return:yes" - }, - { - "library": "pytorch", - "name": "get_graph_provenance_json", - "source_code": "@compatibility(is_backward_compatible = False) def get_graph_provenance_json(graph: Graph) -> dict[str, Any]: provenance_tracking_json = {} for node in graph.nodes: if node.op = = 'call_function': provenance_tracking_json[node.name] = [source.to_dict() for source in node.meta['from_node']] if 'from_node' in node.meta else [] return provenance_tracking_json", - "docstring": "Given an fx.Graph, return a json that contains the provenance information of each node.", - "type": "function", - "file_path": "pytorch\\torch\\fx\\traceback.py", - "ast_data": "FunctionDef name:get_graph_provenance_json arguments arg:graph type:Graph Call call:compatibility Assign For If Compare op:Eq Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, funcs, trackable_obj = None): super(TFLiteConverterV2, self).__init__(funcs, trackable_obj)", - "docstring": "Constructor for TFLiteConverter. Args: funcs: List of TensorFlow ConcreteFunctions. The list should not contain duplicate elements. trackable_obj: tf.AutoTrackable object associated with . A reference to this object needs to be maintained so that Variables do not get garbage collected since functions have a weak reference to Variables. This is only required when the tf.AutoTrackable object is not maintained by the user (e.g. ).", - "type": "method", - "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:funcs arg:trackable_obj" - }, - { - "library": "matplotlib", - "name": "trigger", - "source_code": "def trigger(self, sender, event, data = None): if self._toggled: self.disable(event) else: self.enable(event) self._toggled = not self._toggled", - "docstring": "Calls or based on value.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backend_tools.py", - "ast_data": "FunctionDef name:trigger arguments arg:self arg:sender arg:event arg:data If Assign" - }, - { - "library": "pygame", - "name": "get_italic", - "source_code": "def get_italic(self): return self.oblique", - "docstring": "get_italic() -> bool check if the text will be rendered italic", - "type": "method", - "file_path": "pygame\\src_py\\ftfont.py", - "ast_data": "FunctionDef name:get_italic arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "softplus", - "source_code": "@dispatch.add_dispatch_support def softplus(x): return math_ops.softplus(x)", - "docstring": "Softplus activation function, . Example Usage: >>> a = tf.constant([-20, -1.0, 0.0, 1.0, 20], dtype = tf.float32) >>> b = tf.keras.activations.softplus(a) >>> b.numpy() array([2.0611537e-09, 3.1326166e-01, 6.9314718e-01, 1.3132616e+00, 2.0000000e+01], dtype=float32) Args: x: Input tensor. Returns: The softplus activation: .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\activations.py", - "ast_data": "FunctionDef name:softplus arguments arg:x Return return:yes" - }, - { - "library": "matplotlib", - "name": "juggle_axes", - "source_code": "def juggle_axes(xs, ys, zs, zdir): if zdir = = 'x': return (zs, xs, ys) elif zdir = = 'y': return (xs, zs, ys) elif zdir[0] = = '-': return rotate_axes(xs, ys, zs, zdir) else: return (xs, ys, zs)", - "docstring": "Reorder coordinates so that 2D *xs*, *ys* can be plotted in the plane orthogonal to *zdir*. *zdir* is normally 'x', 'y' or 'z'. However, if *zdir* starts with a '-' it is interpreted as a compensation for .", - "type": "function", - "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", - "ast_data": "FunctionDef name:juggle_axes arguments arg:xs arg:ys arg:zs arg:zdir If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "name", - "source_code": "@property def name(self) -> Hashable: return self._name", - "docstring": "Return the name of the Series. The name of a Series becomes its index or column name if it is used to form a DataFrame. It is also used whenever displaying the Series using the interpreter. Returns ------- label (hashable object) The name of the Series, also the column name if part of a DataFrame. See Also -------- Series.rename : Sets the Series name when given a scalar input. Index.name : Corresponding Index property. Examples -------- The Series name can be set initially when calling the constructor. >>> s = pd.Series([1, 2, 3], dtype=np.int64, name=\"Numbers\") >>> s 0 1 1 2 2 3 Name: Numbers, dtype: int64 >>> s.name = \"Integers\" >>> s 0 1 1 2 2 3 Name: Integers, dtype: int64 The name of a Series within a DataFrame is its column name. >>> df = pd.DataFrame( ... [[1, 2], [3, 4], [5, 6]], columns=[\"Odd Numbers\", \"Even Numbers\"] ... ) >>> df Odd Numbers Even Numbers 0 1 2 1 3 4 2 5 6 >>> df[\"Even Numbers\"].name 'Even Numbers'", - "type": "method", - "file_path": "pandas\\pandas\\core\\series.py", - "ast_data": "FunctionDef name:name arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "maybe_increase_iter", - "source_code": "def maybe_increase_iter(self, bucket): if bucket.is_last(): self.iter + = 1 if self.iter = = self.start_powerSGD_iter: logger.info('Start to apply PowerSGD after %s iterations.', self.iter)", - "docstring": "Track iterations and trigger log message at start of local SGD.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\powerSGD_hook.py", - "ast_data": "FunctionDef name:maybe_increase_iter arguments arg:self arg:bucket If Call call:is_last If Compare op:Eq" - }, - { - "library": "pytorch", - "name": "get_input_node_symbols", - "source_code": "def get_input_node_symbols(node: Union[ir.IRNode, sympy.Expr, ir.TorchBindObject]) -> OrderedSet[sympy.Symbol]: if isinstance(node, ir.TorchBindObject): return OrderedSet() elif isinstance(node, ir.IRNode): return get_layout_symints(node) else: raise NotImplementedError(f'Unsupported input node type: {type(node)}')", - "docstring": "Gets symbols used in input node shapes, strides, and offsets.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\scheduler.py", - "ast_data": "FunctionDef name:get_input_node_symbols arguments arg:node type:Union[ir.IRNode, sympy.Expr, ir.TorchBindObject] If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes Raise raises:NotImplementedError(f'Unsupported input node type: {type(node)}')" - }, - { - "library": "pytorch", - "name": "dispatch_torch_function", - "source_code": "def dispatch_torch_function(tx: 'InstructionTranslator', fn, args, kwargs): all_args = _get_all_args(args, kwargs) overloaded_args = _get_overloaded_args([arg for arg in all_args if has_torch_function(arg)], _get_subclass_type) types = TupleVariable([_get_subclass_type_var(tx, arg) for arg in overloaded_args]) if tx.symbolic_torch_function_state.in_torch_function_mode(): res = tx.symbolic_torch_function_state.call_torch_function_mode(tx, fn, types, args, kwargs) if not (isinstance(res, ConstantVariable) and res.value is NotImplemented): return res for arg in overloaded_args: res = arg.call_torch_function(tx, fn, types, args, kwargs) if not (isinstance(res, ConstantVariable) and res.value is NotImplemented): return res unimplemented_v2(gb_type = 'TypeError from user code', context = f'fn = {fn!r}, args = {args!r}, kwargs = {kwargs!r}', explanation = f'All __torch_function__ overrides for for function {fn} returned NotImplemented', hints = [*graph_break_hints.USER_ERROR])", - "docstring": "Gathers all args that are TensorWithTFOverrideVariable and dispatches based on the ordering in _get_overloaded_args", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\variables\\torch_function.py", - "ast_data": "FunctionDef name:dispatch_torch_function arguments arg:tx type:'InstructionTranslator' arg:fn arg:args arg:kwargs Assign Call call:_get_all_args Assign Call call:_get_overloaded_args Assign Call call:TupleVariable If Call call:in_torch_function_mode Assign Call call:call_torch_function_mode If Return return:yes For Assign Call call:call_torch_function If Return return:yes" - }, - { - "library": "tensorflow", - "name": "from_config", - "source_code": "@classmethod def from_config(cls, config, custom_objects = None): if 'initial_accumulator_value' not in config: config['initial_accumulator_value'] = 0.1 if 'lr' in config: config['learning_rate'] = config.pop('lr') return cls(**config)", - "docstring": "Creates an optimizer from its config. This method is the reverse of , capable of instantiating the same optimizer from the config dictionary. Args: config: A Python dictionary, typically the output of get_config. custom_objects: A Python dictionary mapping names to additional Python objects used to create this optimizer, such as a function used for a hyperparameter. Returns: An optimizer instance.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\optimizer_v2\\adagrad.py", - "ast_data": "FunctionDef name:from_config arguments arg:cls arg:config arg:custom_objects If Compare op:NotIn Assign If Compare op:In Assign Call call:pop Return return:yes" - }, - { - "library": "pytorch", - "name": "is_lowp_fp_source", - "source_code": "def is_lowp_fp_source(node: torch.fx.Node, dt: torch.dtype): assert dt in DTYPE_LOWP_FP return get_output_dtype(node) = = dt", - "docstring": "Check if the given node produces output with expected low precision floating point data type.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp.py", - "ast_data": "FunctionDef name:is_lowp_fp_source arguments arg:node type:torch.fx.Node arg:dt type:torch.dtype Return return:yes" - }, - { - "library": "pytorch", - "name": "host_memory_stats_as_nested_dict", - "source_code": "def host_memory_stats_as_nested_dict() -> dict[str, Any]: if not is_initialized(): return {} return torch._C._cuda_hostMemoryStats()", - "docstring": "Return the result of :func: as a nested dictionary.", - "type": "function", - "file_path": "pytorch\\torch\\cuda\\memory.py", - "ast_data": "FunctionDef name:host_memory_stats_as_nested_dict arguments If Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "to_tensors", - "source_code": "@doc_controls.do_not_doc_inheritable def to_tensors(self, value): tensors = [] nest.map_structure(lambda spec, v: tensors.extend(spec.to_tensors(v)), self._component_specs, self._to_components(value)) return tensors", - "docstring": "See TraceType base class for details. Do not override.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\type_spec.py", - "ast_data": "FunctionDef name:to_tensors arguments arg:self arg:value Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "filecmp", - "source_code": "def filecmp(filename_a, filename_b): size_a = FileIO(filename_a, 'rb').size() size_b = FileIO(filename_b, 'rb').size() if size_a ! = size_b: return False crc_a = file_crc32(filename_a) crc_b = file_crc32(filename_b) return crc_a = = crc_b", - "docstring": "Compare two files, returning True if they are the same, False otherwise. We check size first and return False quickly if the files are different sizes. If they are the same size, we continue to generating a crc for the whole file. You might wonder: why not use Python's instead? The answer is that the builtin library is not robust to the many different filesystems TensorFlow runs on, and so we here perform a similar comparison with the more robust FileIO. Args: filename_a: string path to the first file. filename_b: string path to the second file. Returns: True if the files are the same, False otherwise.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", - "ast_data": "FunctionDef name:filecmp arguments arg:filename_a arg:filename_b Assign Call call:size Assign Call call:size If Compare op:NotEq Return return:yes Assign Call call:file_crc32 Assign Call call:file_crc32 Return return:yes" - }, - { - "library": "seaborn", - "name": "calculate_dendrogram", - "source_code": "def calculate_dendrogram(self): return hierarchy.dendrogram(self.linkage, no_plot = True, color_threshold = -np.inf)", - "docstring": "Calculates a dendrogram based on the linkage matrix Made a separate function, not a property because don't want to recalculate the dendrogram every time it is accessed. Returns ------- dendrogram : dict Dendrogram dictionary as returned by scipy.cluster.hierarchy .dendrogram. The important key-value pairing is \"reordered_ind\" which indicates the re-ordering of the matrix", - "type": "method", - "file_path": "seaborn\\seaborn\\matrix.py", - "ast_data": "FunctionDef name:calculate_dendrogram arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "get_disallowed_checksums", - "source_code": "def get_disallowed_checksums(binary: str) -> set[str]: proc = subprocess.run([binary, 'query', 'kind(http_archive, //external: *)', '--output = xml'], capture_output = True, check = True, text = True) root = ET.fromstring(proc.stdout) disallowed_checksums = set() for rule in root.findall('.//rule[@class = \"http_archive\"]'): urls_node = rule.find('.//list[@name = \"urls\"]') if urls_node is None: continue urls = [n.get('value') for n in urls_node.findall('.//string')] checksum_node = rule.find('.//string[@name = \"sha256\"]') if checksum_node is None: continue checksum = checksum_node.get('value') if not checksum: continue if not is_required_checksum(urls): disallowed_checksums.add(checksum) return disallowed_checksums", - "docstring": "Return the set of disallowed checksums from all http_archive rules", - "type": "function", - "file_path": "pytorch\\tools\\linter\\adapters\\bazel_linter.py", - "ast_data": "FunctionDef name:get_disallowed_checksums arguments arg:binary type:str Assign Call call:run Assign Call call:fromstring Assign Call call:set For Call call:findall Assign Call call:find If Compare op:Is Assign Assign Call call:find If Compare op:Is Assign Call call:get If If Return return:yes" - }, - { - "library": "tensorflow", - "name": "isgenerator", - "source_code": "def isgenerator(object): return _inspect.isgenerator(tf_decorator.unwrap(object)[1])", - "docstring": "TFDecorator-aware replacement for inspect.isgenerator.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_inspect.py", - "ast_data": "FunctionDef name:isgenerator arguments arg:object Return return:yes" - }, - { - "library": "pytorch", - "name": "deserialize", - "source_code": "def deserialize(self, binary_data, tensor_table): global _thread_local_tensor_tables if hasattr(_thread_local_tensor_tables, 'recv_tables'): old_recv_tables = _thread_local_tensor_tables.recv_tables else: old_recv_tables = None _thread_local_tensor_tables.recv_tables = tensor_table try: unpickler = _unpickler(io.BytesIO(binary_data)) ret = unpickler.load() except AttributeError as e: except_str = str(e) + ' Default RPC pickler does not serialize\\n function code. Ensure that UDFs are defined on both caller and\\n callee modules.' ret = AttributeError(except_str) ret.__cause__ = e if old_recv_tables is not None: _thread_local_tensor_tables.recv_tables = old_recv_tables else: del _thread_local_tensor_tables.recv_tables return ret", - "docstring": "Deserialize binary string + tensor table to original obj", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\rpc\\internal.py", - "ast_data": "FunctionDef name:deserialize arguments arg:self arg:binary_data arg:tensor_table If Call call:hasattr Assign Assign Assign Try Assign Call call:_unpickler Assign Call call:load ExceptHandler Assign Assign Call call:AttributeError Assign If Compare op:IsNot Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "version", - "source_code": "def version(): ver = torch._C._nccl_version() major = ver >> 32 minor = ver >> 16 & 65535 patch = ver & 65535 suffix = torch._C._nccl_version_suffix().decode('utf-8') if suffix = = '': return (major, minor, patch) else: return (major, minor, patch, suffix)", - "docstring": "Returns the version of the NCCL. This function returns a tuple containing the major, minor, and patch version numbers of the NCCL. The suffix is also included in the tuple if a version suffix exists. Returns: tuple: The version information of the NCCL.", - "type": "function", - "file_path": "pytorch\\torch\\cuda\\nccl.py", - "ast_data": "FunctionDef name:version arguments Assign Call call:_nccl_version Assign Assign Assign Assign Call call:decode If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "update_catalogs", - "source_code": "def update_catalogs(resources = None, languages = None, verbosity = 0): settings.configure() django.setup() if resources is not None: print('`update_catalogs` will always process all resources.') contrib_dirs = _get_locale_dirs(None, include_core = False) os.chdir(os.path.join(os.getcwd(), 'django')) print('Updating en catalogs for Django and contrib apps...') call_command('makemessages', locale = ['en'], verbosity = verbosity) print('Updating en JS catalogs for Django and contrib apps...') call_command('makemessages', locale = ['en'], domain = 'djangojs', verbosity = verbosity) _check_diff('core', os.path.join(os.getcwd(), 'conf', 'locale')) for name, dir_ in contrib_dirs: _check_diff(name, dir_)", - "docstring": "Update the en/LC_MESSAGES/django.po (main and contrib) files with new/updated translatable strings.", - "type": "function", - "file_path": "django\\scripts\\manage_translations.py", - "ast_data": "FunctionDef name:update_catalogs arguments arg:resources arg:languages arg:verbosity If Compare op:IsNot Assign Call call:_get_locale_dirs For" - }, - { - "library": "django", - "name": "accepts", - "source_code": "def accepts(self, media_type): return self.accepted_type(media_type) is not None", - "docstring": "Does the client accept a response in the given media type?", - "type": "method", - "file_path": "django\\django\\http\\request.py", - "ast_data": "FunctionDef name:accepts arguments arg:self arg:media_type Return return:yes" - }, - { - "library": "tensorflow", - "name": "cast_if_floating_dtype_and_mismatch", - "source_code": "def cast_if_floating_dtype_and_mismatch(targets, outputs): if tensor_util.is_tf_type(targets): return cast_single_tensor(targets, dtype = outputs[0].dtype) new_targets = [] for target, out in zip(targets, outputs): if isinstance(target, np.ndarray): target = tensor_conversion.convert_to_tensor_v2_with_dispatch(target) if target.dtype ! = out.dtype: new_targets.append(cast_single_tensor(target, dtype = out.dtype)) else: new_targets.append(target) return new_targets", - "docstring": "Returns target data tensors using correct datatype. Checks that each target and output pair are the same datatype. If not, casts the target to the output's datatype. Args: targets: tensor or list of targets. outputs: tensor or list of outputs. Returns: Targets in appropriate datatype.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", - "ast_data": "FunctionDef name:cast_if_floating_dtype_and_mismatch arguments arg:targets arg:outputs If Call call:is_tf_type Return return:yes Assign For Call call:zip If Call call:isinstance Assign Call call:convert_to_tensor_v2_with_dispatch If Compare op:NotEq Return return:yes" - }, - { - "library": "tensorflow", - "name": "can_handle", - "source_code": "@staticmethod def can_handle(x, y = None): raise NotImplementedError", - "docstring": "Whether the current DataAdapter could handle the input x and y. Structure wise, x and y can be single object, or list of objects if there multiple input/output, or dictionary of objects when the intput/output are named. Args: x: input features. y: target labels. Note that y could be None in the case of prediction. Returns: boolean", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py", - "ast_data": "FunctionDef name:can_handle arguments arg:x arg:y Raise raises:NotImplementedError" - }, - { - "library": "pytorch", - "name": "selu", - "source_code": "def selu(input: Tensor, inplace: bool = False) -> Tensor: if has_torch_function_unary(input): return handle_torch_function(selu, (input,), input, inplace = inplace) if inplace: result = torch.selu_(input) else: result = torch.selu(input) return result", - "docstring": "selu(input, inplace=False) -> Tensor Applies element-wise, :math:, with :math: and :math:. See :class: for more details.", - "type": "function", - "file_path": "pytorch\\torch\\nn\\functional.py", - "ast_data": "FunctionDef name:selu arguments arg:input type:Tensor arg:inplace type:bool If Call call:has_torch_function_unary Return return:yes If Assign Call call:selu_ Assign Call call:selu Return return:yes" - }, - { - "library": "numpy", - "name": "traverse", - "source_code": "def traverse(obj, visit, parents = [], result = None, *args, **kwargs): if _is_visit_pair(obj): if obj[0] = = 'parent_block': return obj new_result = visit(obj, parents, result, *args, **kwargs) if new_result is not None: assert _is_visit_pair(new_result) return new_result parent = obj result_key, obj = obj else: parent = (None, obj) result_key = None if isinstance(obj, list): new_result = [] for index, value in enumerate(obj): new_index, new_item = traverse((index, value), visit, parents + [parent], result, *args, **kwargs) if new_index is not None: new_result.append(new_item) elif isinstance(obj, dict): new_result = {} for key, value in obj.items(): new_key, new_value = traverse((key, value), visit, parents + [parent], result, *args, **kwargs) if new_key is not None: new_result[new_key] = new_value else: new_result = obj if result_key is None: return new_result return (result_key, new_result)", - "docstring": "Traverse f2py data structure with the following visit function: def visit(item, parents, result, *args, **kwargs): \"\"\" parents is a list of key-\"f2py data structure\" pairs from which items are taken from. result is a f2py data structure that is filled with the return value of the visit function. item is 2-tuple (index, value) if parents[-1][1] is a list item is 2-tuple (key, value) if parents[-1][1] is a dict The return value of visit must be None, or of the same kind as item, that is, if parents[-1] is a list, the return value must be 2-tuple (new_index, new_value), or if parents[-1] is a dict, the return value must be 2-tuple (new_key, new_value). If new_index or new_value is None, the return value of visit is ignored, that is, it will not be added to the result. If the return value is None, the content of obj will be traversed, otherwise not. \"\"\"", - "type": "function", - "file_path": "numpy\\numpy\\f2py\\crackfortran.py", - "ast_data": "FunctionDef name:traverse arguments arg:obj arg:visit arg:parents arg:result vararg:args kwarg:kwargs If Call call:_is_visit_pair If Compare op:Eq Return return:yes Assign Call call:visit If Compare op:IsNot Return return:yes Assign Assign Assign Assign If Call call:isinstance Assign For Call call:enumerate Assign Call call:traverse If Compare op:IsNot If Call call:isinstance Assign For Call call:items Assign Call call:traverse If Compare op:IsNot Assign Assign If Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_antialiased", - "source_code": "def set_antialiased(self, b): if self._antialiased ! = b: self.stale = True self._antialiased = b", - "docstring": "Set whether to use antialiased rendering. Parameters ---------- b : bool", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\lines.py", - "ast_data": "FunctionDef name:set_antialiased arguments arg:self arg:b If Compare op:NotEq Assign Assign" - }, - { - "library": "tensorflow", - "name": "smart_case", - "source_code": "def smart_case(pred_fn_pairs, default = None, exclusive = False, name = 'smart_case'): return control_flow_case._case_helper(smart_cond, pred_fn_pairs, default, exclusive, name, allow_python_preds = True)", - "docstring": "Like tf.case, except attempts to statically evaluate predicates. If any predicate in is a bool or has a constant value, the associated callable will be called or omitted depending on its value. Otherwise this functions like tf.case. Args: pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a callable which returns a list of tensors. default: Optional callable that returns a list of tensors. exclusive: True iff at most one predicate is allowed to evaluate to . name: A name for this operation (optional). Returns: The tensors returned by the first pair whose predicate evaluated to True, or those returned by if none does. Raises: TypeError: If is not a list/dictionary. TypeError: If is a list but does not contain 2-tuples. TypeError: If is not callable for any i, or is not callable.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\smart_cond.py", - "ast_data": "FunctionDef name:smart_case arguments arg:pred_fn_pairs arg:default arg:exclusive arg:name Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_identity_broadcaster", - "source_code": "@classmethod def get_identity_broadcaster(cls, nvals, dtype = None): return _GatherLayerBroadcaster(math_ops.range(nvals, dtype = dtype))", - "docstring": "Create an identity broadcaster. TODO(martinz): an identity broadcaster can be far more efficient than a generic broadcaster. Add an optimized implementation. Args: nvals: the number of values for the broadcaster. dtype: the dtype of the broadcaster, or None to use the dtype of nvals. Returns: an identity broadcaster from [0....nvals-1] to [0...nvals-1]", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\dynamic_ragged_shape.py", - "ast_data": "FunctionDef name:get_identity_broadcaster arguments arg:cls arg:nvals arg:dtype Return return:yes" - }, - { - "library": "seaborn", - "name": "show", - "source_code": "def show(self, **kwargs) -> None: import matplotlib.pyplot as plt with theme_context(self._theme): plt.show(**kwargs)", - "docstring": "Display the plot by hooking into pyplot. This method calls :func: with any keyword parameters.", - "type": "method", - "file_path": "seaborn\\seaborn\\_core\\plot.py", - "ast_data": "FunctionDef name:show arguments arg:self kwarg:kwargs With" - }, - { - "library": "kornia", - "name": "ManyToManyAugmentationDispather", - "source_code": "class ManyToManyAugmentationDispather(nn.Module): def __init__(self, *augmentations: AugmentationSequential) -> None: super().__init__() self._check_consistency(*augmentations) self.augmentations = augmentations def _check_consistency(self, *augmentations: AugmentationSequential) -> bool: for i, aug in enumerate(augmentations): if not isinstance(aug, AugmentationSequential): raise ValueError(f'Please wrap your augmentations[`{i}`] with `AugmentationSequentials`.') return True def forward(self, *input: Union[List[Tensor], List[Tuple[Tensor]]]) -> Union[List[Tensor], List[Tuple[Tensor]]]: return [aug(*inp) for inp, aug in zip(input, self.augmentations)]", - "docstring": "Dispatches different augmentations to different inputs element-wisely. Args: augmentations: a list or a sequence of kornia AugmentationSequential modules. Examples: >>> import torch >>> input_1, input_2 = torch.randn(2, 3, 5, 6), torch.randn(2, 3, 5, 6) >>> mask_1, mask_2 = torch.ones(2, 3, 5, 6), torch.ones(2, 3, 5, 6) >>> aug_list = ManyToManyAugmentationDispather( ... AugmentationSequential( ... kornia.augmentation.ColorJiggle(0.1, 0.1, 0.1, 0.1, p=1.0), ... kornia.augmentation.RandomAffine(360, p=1.0), ... data_keys=[\"input\", \"mask\",], ... ), ... AugmentationSequential( ... kornia.augmentation.ColorJiggle(0.1, 0.1, 0.1, 0.1, p=1.0), ... kornia.augmentation.RandomAffine(360, p=1.0), ... data_keys=[\"input\", \"mask\",], ... ) ... ) >>> output = aug_list((input_1, mask_1), (input_2, mask_2))", - "type": "class", - "file_path": "kornia\\kornia\\augmentation\\container\\dispatcher.py", - "ast_data": "ClassDef name:ManyToManyAugmentationDispather FunctionDef name:__init__ arguments arg:self vararg:augmentations Assign FunctionDef name:_check_consistency arguments arg:self vararg:augmentations For Call call:enumerate If Raise raises:ValueError(f'Please wrap your augmentations[`{i}`] with `AugmentationSequentials`.') Return return:yes FunctionDef name:forward arguments arg:self vararg:input Return return:yes" - }, - { - "library": "django", - "name": "equals", - "source_code": "def equals(self, other): return capi.geos_equals(self.ptr, other.ptr)", - "docstring": "Return true if the DE-9IM intersection matrix for the two Geometries is T*F**FFF*.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", - "ast_data": "FunctionDef name:equals arguments arg:self arg:other Return return:yes" - }, - { - "library": "matplotlib", - "name": "transform_point", - "source_code": "def transform_point(self, point): if len(point) ! = self.input_dims: raise ValueError(\"The length of 'point' must be 'self.input_dims'\") return self.transform(point)", - "docstring": "Return a transformed point. This function is only kept for backcompatibility; the more general method is capable of transforming both a list of points and a single point. The point is given as a sequence of length :attr:. The transformed point is returned as a sequence of length :attr:.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\transforms.py", - "ast_data": "FunctionDef name:transform_point arguments arg:self arg:point If Compare op:NotEq Raise raises:ValueError(\"The length of 'point' must be 'self.input_dims'\") Return return:yes" - }, - { - "library": "numpy", - "name": "get_api_functions", - "source_code": "def get_api_functions(tagname, api_dict): functions = [] for f in API_FILES: functions.extend(find_functions(f, tagname)) dfunctions = [(api_dict[func.name][0], func) for func in functions] dfunctions.sort() return [a[1] for a in dfunctions]", - "docstring": "Parse source files to get functions tagged by the given tag.", - "type": "function", - "file_path": "numpy\\numpy\\_core\\code_generators\\genapi.py", - "ast_data": "FunctionDef name:get_api_functions arguments arg:tagname arg:api_dict Assign For Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, num_groups = 2): if num_groups < 1: raise ValueError(f'Argument `num_groups` must be a positive integer. Received: num_groups = {num_groups}') self._ready = threading.Condition(threading.Lock()) self._num_groups = num_groups self._group_member_counts = [0] * self._num_groups", - "docstring": "Initialize a group lock. Args: num_groups: The number of groups that will be accessing the resource under consideration. Should be a positive number. Returns: A group lock that can then be used to synchronize code. Raises: ValueError: If num_groups is less than 1.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\util\\lock_util.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:num_groups If Compare op:Lt Raise raises:ValueError(f'Argument `num_groups` must be a positive integer. Received: num_groups={num_groups}') Assign Call call:Condition Assign Assign" - }, - { - "library": "pytorch", - "name": "QuantWrapper", - "source_code": "class QuantWrapper(nn.Module): quant: QuantStub dequant: DeQuantStub module: nn.Module def __init__(self, module): super().__init__() qconfig = getattr(module, 'qconfig', None) self.add_module('quant', QuantStub(qconfig)) self.add_module('dequant', DeQuantStub(qconfig)) self.add_module('module', module) self.train(module.training) def forward(self, X): X = self.quant(X) X = self.module(X) return self.dequant(X)", - "docstring": "A wrapper class that wraps the input module, adds QuantStub and DeQuantStub and surround the call to module with call to quant and dequant modules. This is used by the utility functions to add the quant and dequant modules, before function will just be observer, it observes the input tensor, after , will be swapped to which does actual quantization. Similarly for .", - "type": "class", - "file_path": "pytorch\\torch\\ao\\quantization\\stubs.py", - "ast_data": "ClassDef name:QuantWrapper FunctionDef name:__init__ arguments arg:self arg:module Assign Call call:getattr FunctionDef name:forward arguments arg:self arg:X Assign Call call:quant Assign Call call:module Return return:yes" - }, - { - "library": "pytorch", - "name": "write", - "source_code": "@classmethod def write(cls, source_code: str, dst_file_ext: str) -> tuple[str, str]: if config.cuda.cutlass_hash_with_compile_cmd: cuda_command = repr(cuda_compile_command(['dummy_input'], 'dummy_output', dst_file_ext)) extra = cuda_command else: extra = repr([_cuda_compiler(), _nvcc_compiler_options(), _nvcc_host_compiler_options(), cutlass_key()] + [dst_file_ext] if dst_file_ext = = 'o' else []) key, input_path = write(source_code, cls._SOURCE_CODE_SUFFIX, extra = extra) return (key, input_path)", - "docstring": "Writes source code into a file with dst_file_ext as the file extension. Returns the hash key of source code, and the path to the file.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codecache.py", - "ast_data": "FunctionDef name:write arguments arg:cls arg:source_code type:str arg:dst_file_ext type:str If Assign Call call:repr Assign Assign Call call:repr Assign Call call:write Return return:yes" - }, - { - "library": "django", - "name": "safe_join", - "source_code": "def safe_join(base, *paths): final_path = abspath(join(base, *paths)) base_path = abspath(base) if not normcase(final_path).startswith(normcase(base_path + sep)) and normcase(final_path) ! = normcase(base_path) and (dirname(normcase(base_path)) ! = normcase(base_path)): raise SuspiciousFileOperation('The joined path ({}) is located outside of the base path component ({})'.format(final_path, base_path)) return final_path", - "docstring": "Join one or more path components to the base path component intelligently. Return a normalized, absolute version of the final path. Raise SuspiciousFileOperation if the final path isn't located inside of the base path component.", - "type": "function", - "file_path": "django\\django\\utils\\_os.py", - "ast_data": "FunctionDef name:safe_join arguments arg:base vararg:paths Assign Call call:abspath Assign Call call:abspath If BoolOp Compare op:NotEq Compare op:NotEq Raise raises:SuspiciousFileOperation('The joined path ({}) is located outside of the base path component ({})'.format(final_path, base_path)) Return return:yes" - }, - { - "library": "tensorflow", - "name": "compute_weighted_loss", - "source_code": "def compute_weighted_loss(losses, sample_weight = None, reduction = ReductionV2.SUM_OVER_BATCH_SIZE, name = None): ReductionV2.validate(reduction) if reduction = = ReductionV2.AUTO: reduction = ReductionV2.SUM_OVER_BATCH_SIZE if sample_weight is None: sample_weight = 1.0 with backend.name_scope(name or 'weighted_loss'): ops.get_default_graph()._last_loss_reduction = reduction if not isinstance(losses, (keras_tensor.KerasTensor, ragged_tensor.RaggedTensor)): losses = tensor_conversion.convert_to_tensor_v2_with_dispatch(losses) input_dtype = losses.dtype if not isinstance(sample_weight, keras_tensor.KerasTensor): sample_weight = tensor_conversion.convert_to_tensor_v2_with_dispatch(sample_weight) losses = math_ops.cast(losses, 'float32') sample_weight = math_ops.cast(sample_weight, 'float32') losses, _, sample_weight = squeeze_or_expand_dimensions(losses, None, sample_weight) weighted_losses = math_ops.multiply(losses, sample_weight) loss = reduce_weighted_loss(weighted_losses, reduction) loss = math_ops.cast(loss, input_dtype) return loss", - "docstring": "Computes the weighted loss. Args: losses: of shape . sample_weight: Optional whose rank is either 0, or the same rank as , or be broadcastable to . reduction: (Optional) Type of to apply to loss. Default value is . name: Optional name for the op. Raises: ValueError: If the shape of is not compatible with . Returns: Weighted loss of the same type as . If is , this has the same shape as ; otherwise, it is scalar.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\losses_utils.py", - "ast_data": "FunctionDef name:compute_weighted_loss arguments arg:losses arg:sample_weight arg:reduction arg:name If Compare op:Eq Assign If Compare op:Is Assign With Assign If Assign Call call:convert_to_tensor_v2_with_dispatch Assign If Assign Call call:convert_to_tensor_v2_with_dispatch Assign Call call:cast Assign Call call:cast Assign Call call:squeeze_or_expand_dimensions Assign Call call:multiply Assign Call call:reduce_weighted_loss Assign Call call:cast Return return:yes" - }, - { - "library": "numpy", - "name": "HermiteE", - "source_code": "class HermiteE(ABCPolyBase): _add = staticmethod(hermeadd) _sub = staticmethod(hermesub) _mul = staticmethod(hermemul) _div = staticmethod(hermediv) _pow = staticmethod(hermepow) _val = staticmethod(hermeval) _int = staticmethod(hermeint) _der = staticmethod(hermeder) _fit = staticmethod(hermefit) _line = staticmethod(hermeline) _roots = staticmethod(hermeroots) _fromroots = staticmethod(hermefromroots) domain = np.array(hermedomain) window = np.array(hermedomain) basis_name = 'He'", - "docstring": "An HermiteE series class. The HermiteE class provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the attributes and methods listed below. Parameters ---------- coef : array_like HermiteE coefficients in order of increasing degree, i.e, `domain` for its use. The default value is [-1., 1.]. symbol : str, optional Symbol used to represent the independent variable in string representations of the polynomial expression, e.g. for printing. The symbol must be a valid Python identifier. Default value is 'x'. .. versionadded:: 1.24", - "type": "class", - "file_path": "numpy\\numpy\\polynomial\\hermite_e.py", - "ast_data": "ClassDef name:HermiteE Assign Call call:staticmethod Assign Call call:staticmethod Assign Call call:staticmethod Assign Call call:staticmethod Assign Call call:staticmethod Assign Call call:staticmethod Assign Call call:staticmethod Assign Call call:staticmethod Assign Call call:staticmethod Assign Call call:staticmethod Assign Call call:staticmethod Assign Call call:staticmethod Assign Call call:array Assign Call call:array Assign" - }, - { - "library": "feincms", - "name": "tag_model", - "source_code": "def tag_model(cls, admin_cls = None, field_name = 'tags', sort_tags = False, select_field = False, auto_add_admin_field = True, admin_list_display = True): cls.add_to_class(field_name, (TagSelectField if select_field else TagField)(field_name.capitalize(), blank = True)) try: tagging_register(cls, tag_descriptor_attr = 'tagging_' + field_name) except AlreadyRegistered: return if admin_cls: if admin_list_display: admin_cls.list_display.append(field_name) admin_cls.list_filter.append(field_name) if auto_add_admin_field and hasattr(admin_cls, 'add_extension_options'): admin_cls.add_extension_options(_('Tagging'), {'fields': (field_name,)}) if sort_tags: pre_save.connect(pre_save_handler, sender = cls)", - "docstring": "tag_model accepts a number of named parameters: admin_cls If set to a subclass of ModelAdmin, will insert the tag field into the list_display and list_filter fields. field_name Defaults to \"tags\", can be used to name your tag field differently. sort_tags Boolean, defaults to False. If set to True, a pre_save handler will be inserted to sort the tag field alphabetically. This is useful in case you want a canonical representation for a tag collection, as when yo're presenting a list of tag combinations (e.g. in an admin filter list). select_field If True, show a multi select instead of the standard CharField for tag entry. auto_add_admin_field If True, attempts to add the tag field to the admin class.", - "type": "function", - "file_path": "feincms\\feincms\\contrib\\tagging.py", - "ast_data": "FunctionDef name:tag_model arguments arg:cls arg:admin_cls arg:field_name arg:sort_tags arg:select_field arg:auto_add_admin_field arg:admin_list_display Try ExceptHandler Return return:no If If If BoolOp Call call:hasattr If" - }, - { - "library": "pytorch", - "name": "unify", - "source_code": "@dispatch(object, object, dict) def unify(u, v, s): u = walk(u, s) v = walk(v, s) if u = = v: return s if isvar(u): return assoc(s, u, v) if isvar(v): return assoc(s, v, u) return _unify(u, v, s)", - "docstring": "Find substitution so that u == v while satisfying s >>> x = var(\"x\") >>> unify((1, x), (1, 2), {}) {~x: 2}", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\unification\\core.py", - "ast_data": "FunctionDef name:unify arguments arg:u arg:v arg:s Call call:dispatch Assign Call call:walk Assign Call call:walk If Compare op:Eq Return return:yes If Call call:isvar Return return:yes If Call call:isvar Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "report_proto_path", - "source_code": "def report_proto_path(self): return self._report_proto_path", - "docstring": "Getter for path where tensor_tracer.proto object should be written. Returns: A string path.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", - "ast_data": "FunctionDef name:report_proto_path arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "shutdown", - "source_code": "def shutdown(self) -> None: nodes = [] for roots in self.roots.values(): nodes.extend(roots) while nodes: node = nodes.pop() for children in node.children.values(): nodes.extend(children) node.remove_node_cached_tensors() node.graph = None self.graph = None self.roots = None self.current_node = None", - "docstring": "Remove all cached tensors in all nodes. Because cached tensors can hold gradients which in turn might reference a backward which invokes a CUDA Graph Node, we have to manually clear them on shutdown to avoid a reference cycle.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py", - "ast_data": "FunctionDef name:shutdown arguments arg:self Assign For Call call:values While Assign Call call:pop For Call call:values Assign Assign Assign Assign" - }, - { - "library": "pytorch", - "name": "dont_skip_tracing", - "source_code": "def dont_skip_tracing(fn = None): ctx = patch_dynamo_config(dont_skip_tracing = True) if fn: return ctx(fn) return ctx", - "docstring": "Context manager/decorator to trace into functions intentionally marked by developers to be skipped when tracing. This decorator will also apply to recursively invoked functions.", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\decorators.py", - "ast_data": "FunctionDef name:dont_skip_tracing arguments arg:fn Assign Call call:patch_dynamo_config If Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "revert_all_patches", - "source_code": "def revert_all_patches(self): for patch in self.patches_made: patch.revert() return self.patches_made", - "docstring": "Remove all the stored patcheds. It doesn't modify patches_made.", - "type": "method", - "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py", - "ast_data": "FunctionDef name:revert_all_patches arguments arg:self For Return return:yes" - }, - { - "library": "tensorflow", - "name": "KerasHistory", - "source_code": "class KerasHistory(collections.namedtuple('KerasHistory', ['layer', 'node_index', 'tensor_index'])): __slots__ = ()", - "docstring": "Tracks the Layer call that created a Tensor, for Keras Graph Networks. During construction of Keras Graph Networks, this metadata is added to each Tensor produced as the output of a Layer, starting with an . This allows Keras to track how each Tensor was produced, and this information is later retraced by the class to reconstruct the Keras Graph Network. Attributes: layer: The Layer that produced the Tensor. node_index: The specific call to the Layer that produced this Tensor. Layers can be called multiple times in order to share weights. A new node is created every time a Layer is called. tensor_index: The output index for this Tensor. Always zero if the Layer that produced this Tensor only has one output. Nested structures of Tensors are deterministically assigned an index via .", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\node.py", - "ast_data": "ClassDef name:KerasHistory Call call:namedtuple Assign" - }, - { - "library": "matplotlib", - "name": "caching_module_getattr", - "source_code": "def caching_module_getattr(cls): assert cls.__name__ = = '__getattr__' props = {name: prop for name, prop in vars(cls).items() if isinstance(prop, property)} instance = cls() @functools.cache def __getattr__(name): if name in props: return props[name].__get__(instance) raise AttributeError(f'module {cls.__module__!r} has no attribute {name!r}') return __getattr__", - "docstring": "Helper decorator for implementing module-level `` for deprecating module globals). The properties are all implicitly cached. Moreover, a suitable AttributeError is generated and raised if no property with the given name exists.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\_api\\__init__.py", - "ast_data": "FunctionDef name:caching_module_getattr arguments arg:cls Assign Assign Call call:cls FunctionDef name:__getattr__ arguments arg:name If Compare op:In Return return:yes Raise raises:AttributeError(f'module {cls.__module__!r} has no attribute {name!r}') Return return:yes" - }, - { - "library": "mongo", - "name": "upserted_ids", - "source_code": "@property def upserted_ids(self) -> Optional[dict[int, Any]]: self._raise_if_unacknowledged('upserted_ids') if self.bulk_api_result: return {upsert['index']: upsert['_id'] for upsert in self.bulk_api_result['upserted']} return None", - "docstring": "A map of operation index to the _id of the upserted document.", - "type": "method", - "file_path": "mongo\\pymongo\\results.py", - "ast_data": "FunctionDef name:upserted_ids arguments arg:self If Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "pep440_split_post", - "source_code": "def pep440_split_post(ver): vc = str.split(ver, '.post') return (vc[0], int(vc[1] or 0) if len(vc) = = 2 else None)", - "docstring": "Split pep440 version string at the post-release segment. Returns the release segments before the post-release and the post-release version number (or -1 if no post-release segment is present).", - "type": "function", - "file_path": "pandas\\pandas\\_version.py", - "ast_data": "FunctionDef name:pep440_split_post arguments arg:ver Assign Call call:split Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_max", - "source_code": "def set_max(self, max): self.set_val((self.val[0], max))", - "docstring": "Set the lower value of the slider to *max*. Parameters ---------- max : float", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\widgets.py", - "ast_data": "FunctionDef name:set_max arguments arg:self arg:max" - }, - { - "library": "coconut", - "name": "get_generic_for_typevars", - "source_code": "def get_generic_for_typevars(self): internal_assert(self.target_info < (3, 12), 'get_generic_for_typevars should only be used on targets < 3.12') typevar_info = self.current_parsing_context('typevars') internal_assert(typevar_info is not None, 'get_generic_for_typevars called with no typevars') generics = [] for TypeVarFunc, name in typevar_info['new_typevars']: if TypeVarFunc in ('TypeVar', 'ParamSpec'): generics.append(name) elif TypeVarFunc = = 'TypeVarTuple': if self.target_info > = (3, 11): generics.append('*' + name) else: generics.append('_coconut.typing.Unpack[' + name + ']') else: raise CoconutInternalException('invalid TypeVarFunc', TypeVarFunc, '(', name, ')') return '_coconut.typing.Generic[' + ', '.join(generics) + ']'", - "docstring": "Get the Generic instances for the current typevars.", - "type": "method", - "file_path": "coconut\\coconut\\compiler\\compiler.py", - "ast_data": "FunctionDef name:get_generic_for_typevars arguments arg:self Assign Call call:current_parsing_context Assign For If Compare op:In If Compare op:Eq If Compare op:GtE Raise raises:CoconutInternalException('invalid TypeVarFunc', TypeVarFunc, '(', name, ')') Return return:yes" - }, - { - "library": "scipy", - "name": "append", - "source_code": "def append(a, vancestors): add = True for j, va in enumerate(vancestors): if issubclass(va, a): add = False break if issubclass(a, va): vancestors[j] = a add = False if add: vancestors.append(a)", - "docstring": "Append `` to the list of the virtual ancestors, unless it is already included.", - "type": "function", - "file_path": "scipy\\scipy\\_lib\\decorator.py", - "ast_data": "FunctionDef name:append arguments arg:a arg:vancestors Assign For Call call:enumerate If Call call:issubclass Assign If Call call:issubclass Assign Assign If" - }, - { - "library": "tensorflow", - "name": "copy_origin", - "source_code": "def copy_origin(from_node, to_node): origin = anno.Basic.ORIGIN.of(from_node, default = None) if origin is None: return if not isinstance(to_node, (list, tuple)): to_node = (to_node,) for node in to_node: for n in gast.walk(node): anno.setanno(n, anno.Basic.ORIGIN, origin)", - "docstring": "Copies the origin info from a node to another, recursively.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\origin_info.py", - "ast_data": "FunctionDef name:copy_origin arguments arg:from_node arg:to_node Assign Call call:of If Compare op:Is Return return:no If Assign For For Call call:walk" - }, - { - "library": "tensorflow", - "name": "get_input_at", - "source_code": "@doc_controls.do_not_doc_inheritable def get_input_at(self, node_index): return self._get_node_attribute_at_index(node_index, 'input_tensors', 'input')", - "docstring": "Retrieves the input tensor(s) of a layer at a given node. Args: node_index: Integer, index of the node from which to retrieve the attribute. E.g. will correspond to the first input node of the layer. Returns: A tensor (or list of tensors if the layer has multiple inputs). Raises: RuntimeError: If called in Eager mode.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", - "ast_data": "FunctionDef name:get_input_at arguments arg:self arg:node_index Return return:yes" - }, - { - "library": "scipy", - "name": "kstwobign_gen", - "source_code": "class kstwobign_gen(rv_continuous): def _shape_info(self): return [] def _pdf(self, x): return -scu._kolmogp(x) def _cdf(self, x): return scu._kolmogc(x) def _sf(self, x): return sc.kolmogorov(x) def _ppf(self, q): return scu._kolmogci(q) def _isf(self, q): return sc.kolmogi(q)", - "docstring": "Limiting distribution of scaled Kolmogorov-Smirnov two-sided test statistic. This is the asymptotic distribution of the two-sided Kolmogorov-Smirnov statistic :math: that measures the maximum absolute distance of the theoretical (continuous) CDF from the empirical CDF. (see ). %(before_notes)s See Also -------- ksone, kstwo, kstest Notes ----- :math: is given by .. math:: D_n = \\text{sup}_x |F_n(x) - F(x)| where :math: is a continuous CDF and :math: is an empirical CDF. describes the asymptotic distribution (i.e. the limit of :math:) under the null hypothesis of the KS test that the empirical CDF corresponds to i.i.d. random variates with CDF :math:. %(after_notes)s References ---------- .. [1] Feller, W. \"On the Kolmogorov-Smirnov Limit Theorems for Empirical Distributions\", Ann. Math. Statist. Vol 19, 177-189 (1948). %(example)s", - "type": "class", - "file_path": "scipy\\scipy\\stats\\_continuous_distns.py", - "ast_data": "ClassDef name:kstwobign_gen FunctionDef name:_shape_info arguments arg:self Return return:yes FunctionDef name:_pdf arguments arg:self arg:x Return return:yes FunctionDef name:_cdf arguments arg:self arg:x Return return:yes FunctionDef name:_sf arguments arg:self arg:x Return return:yes FunctionDef name:_ppf arguments arg:self arg:q Return return:yes FunctionDef name:_isf arguments arg:self arg:q Return return:yes" - }, - { - "library": "tensorflow", - "name": "convert_object_to_bytearray", - "source_code": "def convert_object_to_bytearray(model_object, extra_buffer = b''): builder = flatbuffers.Builder(1024) model_offset = model_object.Pack(builder) builder.Finish(model_offset, file_identifier = _TFLITE_FILE_IDENTIFIER) model_bytearray = bytes(builder.Output()) model_bytearray = model_bytearray + extra_buffer return model_bytearray", - "docstring": "Converts a tflite model from an object to a immutable bytearray.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\lite\\tools\\flatbuffer_utils.py", - "ast_data": "FunctionDef name:convert_object_to_bytearray arguments arg:model_object arg:extra_buffer Assign Call call:Builder Assign Call call:Pack Assign Call call:bytes Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "sharex", - "source_code": "def sharex(self, other): _api.check_isinstance(_AxesBase, other = other) if self._sharex is not None and other is not self._sharex: raise ValueError('x-axis is already shared') self._shared_axes['x'].join(self, other) self._sharex = other self.xaxis.major = other.xaxis.major self.xaxis.minor = other.xaxis.minor x0, x1 = other.get_xlim() self.set_xlim(x0, x1, emit = False, auto = other.get_autoscalex_on()) self.xaxis._scale = other.xaxis._scale", - "docstring": "Share the x-axis with *other*. This is equivalent to passing `` when constructing the Axes, and cannot be used if the x-axis is already being shared with another Axes. Note that it is not possible to unshare axes.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", - "ast_data": "FunctionDef name:sharex arguments arg:self arg:other If BoolOp Compare op:IsNot Compare op:IsNot Raise raises:ValueError('x-axis is already shared') Assign Assign Assign Assign Call call:get_xlim Assign" - }, - { - "library": "matplotlib", - "name": "set_stretch", - "source_code": "def set_stretch(self, stretch): stretch = mpl._val_or_rc(stretch, 'font.stretch') if stretch in stretch_dict: self._stretch = stretch return try: stretch = int(stretch) except ValueError: pass else: if 0 < = stretch < = 1000: self._stretch = stretch return raise ValueError(f'stretch = {stretch!r} is invalid')", - "docstring": "Set the font stretch or width. Parameters ---------- stretch : int or {'ultra-condensed', 'extra-condensed', 'condensed', 'semi-condensed', 'normal', 'semi-expanded', 'expanded', 'extra-expanded', 'ultra-expanded'}, default: :rc: If int, must be in the range 0-1000.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py", - "ast_data": "FunctionDef name:set_stretch arguments arg:self arg:stretch Assign Call call:_val_or_rc If Compare op:In Assign Return return:no Try Assign Call call:int ExceptHandler If Compare op:LtE op:LtE Assign Return return:no Raise raises:ValueError(f'stretch={stretch!r} is invalid')" - }, - { - "library": "pandas", - "name": "T", - "source_code": "@property def T(self) -> DataFrame: return self.transpose()", - "docstring": "The transpose of the DataFrame. Returns ------- DataFrame The transposed DataFrame. See Also -------- DataFrame.transpose : Transpose index and columns. Examples -------- >>> df = pd.DataFrame({\"col1\": [1, 2], \"col2\": [3, 4]}) >>> df col1 col2 0 1 3 1 2 4 >>> df.T 0 1 col1 1 2 col2 3 4", - "type": "method", - "file_path": "pandas\\pandas\\core\\frame.py", - "ast_data": "FunctionDef name:T arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "is_valid_permutation", - "source_code": "def is_valid_permutation(rank: int, perm: DimsSequenceType) -> bool: return isinstance(perm, Sequence) and sorted(perm) = = list(range(rank))", - "docstring": "Validates that perm is a permutation of length rank.", - "type": "function", - "file_path": "pytorch\\torch\\_prims_common\\__init__.py", - "ast_data": "FunctionDef name:is_valid_permutation arguments arg:rank type:int arg:perm type:DimsSequenceType Return return:yes" - }, - { - "library": "tensorflow", - "name": "validate_config", - "source_code": "def validate_config(config): return isinstance(config, dict) and _LAYER_UNDEFINED_CONFIG_KEY not in config", - "docstring": "Determines whether config appears to be a valid layer config.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\generic_utils.py", - "ast_data": "FunctionDef name:validate_config arguments arg:config Return return:yes" - }, - { - "library": "pytorch", - "name": "is_structseq", - "source_code": "def is_structseq(obj: Union[object, type]) -> bool: cls = obj if isinstance(obj, type) else type(obj) return is_structseq_class(cls)", - "docstring": "Return whether the object is an instance of PyStructSequence or a class of PyStructSequence.", - "type": "function", - "file_path": "pytorch\\torch\\utils\\_pytree.py", - "ast_data": "FunctionDef name:is_structseq arguments arg:obj type:Union[object, type] Assign Return return:yes" - }, - { - "library": "authlib", - "name": "token", - "source_code": "@token.setter def token(self, token): if token is None: self.auth.token = None self.auth.token_secret = None self.auth.verifier = None elif 'oauth_token' in token: self.auth.token = token['oauth_token'] if 'oauth_token_secret' in token: self.auth.token_secret = token['oauth_token_secret'] if 'oauth_verifier' in token: self.auth.verifier = token['oauth_verifier'] else: message = f'oauth_token is missing: {token!r}' self.handle_error('missing_token', message)", - "docstring": "This token setter is designed for an easy integration for OAuthClient. Make sure both OAuth1Session and OAuth2Session have token setters.", - "type": "method", - "file_path": "authlib\\authlib\\oauth1\\client.py", - "ast_data": "FunctionDef name:token arguments arg:self arg:token If Compare op:Is Assign Assign Assign If Compare op:In Assign If Compare op:In Assign If Compare op:In Assign Assign" - }, - { - "library": "pandas", - "name": "__init__", - "source_code": "@doc(storage_options = _shared_docs['storage_options']) def __init__(self, filepath_or_buffer: FilePath | ReadBuffer[bytes], storage_options: StorageOptions | None = None, engine_kwargs: dict | None = None) -> None: import_optional_dependency('pyxlsb') super().__init__(filepath_or_buffer, storage_options = storage_options, engine_kwargs = engine_kwargs)", - "docstring": "Reader using pyxlsb engine. Parameters ---------- filepath_or_buffer : str, path object, or Workbook Object to be parsed. {storage_options} engine_kwargs : dict, optional Arbitrary keyword arguments passed to excel engine.", - "type": "method", - "file_path": "pandas\\pandas\\io\\excel\\_pyxlsb.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:filepath_or_buffer type:FilePath | ReadBuffer[bytes] arg:storage_options type:StorageOptions | None arg:engine_kwargs type:dict | None Call call:doc" - }, - { - "library": "pytorch", - "name": "CheckpointWrapper", - "source_code": "class CheckpointWrapper(ActivationWrapper): def __init__(self, mod: torch.nn.Module, checkpoint_impl: CheckpointImpl = CheckpointImpl.NO_REENTRANT, checkpoint_fn = None, **checkpoint_fn_kwargs): super().__init__(mod) self.checkpoint_impl = checkpoint_impl if checkpoint_fn is None: self.checkpoint_fn = partial(torch_utils_checkpoint, use_reentrant = self.checkpoint_impl = = CheckpointImpl.REENTRANT, **checkpoint_fn_kwargs) else: self.checkpoint_fn = partial(checkpoint_fn, **checkpoint_fn_kwargs) def forward(self, *args, **kwargs): if self.checkpoint_impl = = CheckpointImpl.REENTRANT and kwargs ! = {}: flat_args, kwarg_keys = _pack_kwargs(*args, **kwargs) def my_function(*inputs): unpacked_args, unpacked_kwargs = _unpack_kwargs(inputs, kwarg_keys) return self._checkpoint_wrapped_module(*unpacked_args, **unpacked_kwargs) return self.checkpoint_fn(my_function, *flat_args) else: return self.checkpoint_fn(self._checkpoint_wrapped_module, *args, **kwargs)", - "docstring": "An `` function.", - "type": "class", - "file_path": "pytorch\\torch\\distributed\\algorithms\\_checkpoint\\checkpoint_wrapper.py", - "ast_data": "ClassDef name:CheckpointWrapper FunctionDef name:__init__ arguments arg:self arg:mod type:torch.nn.Module arg:checkpoint_impl type:CheckpointImpl arg:checkpoint_fn kwarg:checkpoint_fn_kwargs Assign If Compare op:Is Assign Call call:partial Assign Call call:partial FunctionDef name:forward arguments arg:self vararg:args kwarg:kwargs If BoolOp Compare op:Eq Compare op:NotEq Assign Call call:_pack_kwargs FunctionDef name:my_function arguments vararg:inputs Assign Call call:_unpack_kwargs Return return:yes Return return:yes Return return:yes" - }, - { - "library": "scikit-learn", - "name": "__call__", - "source_code": "def __call__(self, df): if not hasattr(df, 'iloc'): raise ValueError('make_column_selector can only be applied to pandas dataframes') df_row = df.iloc[: 1] if self.dtype_include is not None or self.dtype_exclude is not None: df_row = df_row.select_dtypes(include = self.dtype_include, exclude = self.dtype_exclude) cols = df_row.columns if self.pattern is not None: cols = cols[cols.str.contains(self.pattern, regex = True)] return cols.tolist()", - "docstring": "Callable for column selection to be used by a :class:. Parameters ---------- df : dataframe of shape (n_features, n_samples) DataFrame to select columns from.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\compose\\_column_transformer.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:df If Raise raises:ValueError('make_column_selector can only be applied to pandas dataframes') Assign If BoolOp Compare op:IsNot Compare op:IsNot Assign Call call:select_dtypes Assign If Compare op:IsNot Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_size", - "source_code": "@abc.abstractmethod def get_size(self): raise NotImplementedError", - "docstring": "Return the size (number of batches) for the dataset created. For certain type of the data input, the number of batches is known, eg for Numpy data, the size is same as (number_of_element / batch_size). Whereas for dataset or python generator, the size is unknown since it may or may not have a end state. Returns: int, the number of batches for the dataset, or None if it is unknown. The caller could use this to control the loop of training, show progress bar, or handle unexpected StopIteration error.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\data_adapter.py", - "ast_data": "FunctionDef name:get_size arguments arg:self Raise raises:NotImplementedError" - }, - { - "library": "tensorflow", - "name": "prune_unconnected_ops_from_xla", - "source_code": "def prune_unconnected_ops_from_xla(prune_graph: ops.Graph): for graph in [prune_graph] + [f for f in prune_graph._functions.values()]: if not isinstance(graph, ops.Graph): continue for op in graph.get_operations(): if op.type not in _UNCONNECTED_OPS_TO_PRUNE: continue outputs_consumed = False for output in op.outputs: if output.consumers(): outputs_consumed = True break if not outputs_consumed: logging.info('Pruning OP %s of type %s from XLA Compile due to it being disconnected.', op.name, op.type) op._clear_attr(tpu_replication._TPU_REPLICATE_ATTR)", - "docstring": "Prunes unconnected ops as listed in _UNCONNECTED_OPS_TO_PRUNE. Args: prune_graph: A tensorflow graph from which we wish to prune unconnected ops as listed in _UNCONNECTED_OPS_TO_PRUNE. In general, these ops should have no inputs and no consumers. These can often be left behind due to graph construction rewiring (for instance TF-Hub). While they never execute, they will cause XLA compile to fail so we strip them from XLA compile by removing the tpu_replicate attribute.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu.py", - "ast_data": "FunctionDef name:prune_unconnected_ops_from_xla arguments arg:prune_graph type:ops.Graph For If For Call call:get_operations If Compare op:NotIn Assign For If Call call:consumers Assign If" - }, - { - "library": "mongo", - "name": "max_idle_time_seconds", - "source_code": "@property def max_idle_time_seconds(self) -> Optional[int]: return self.__max_idle_time_seconds", - "docstring": "The maximum number of seconds that a connection can remain idle in the pool before being removed and replaced. Defaults to (no limit).", - "type": "method", - "file_path": "mongo\\pymongo\\pool_options.py", - "ast_data": "FunctionDef name:max_idle_time_seconds arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "CollatorIterDataPipe", - "source_code": "@functional_datapipe('collate') class CollatorIterDataPipe(MapperIterDataPipe): def __init__(self, datapipe: IterDataPipe, conversion: Union[Callable[..., Any], dict[Union[str, Any], Union[Callable, Any]], None] = default_collate, collate_fn: Optional[Callable] = None) -> None: if collate_fn is not None: super().__init__(datapipe, fn = collate_fn) elif callable(conversion): super().__init__(datapipe, fn = conversion) else: collate_fn = functools.partial(_collate_helper, conversion) super().__init__(datapipe, fn = collate_fn)", - "docstring": "Collates samples from DataPipe to Tensor(s) by a custom collate function (functional name: `torch.utils.data.default_collatetorch.utils.data.default_collatefunctools.partial` to specify any additional arguments. Args: datapipe: Iterable DataPipe being collated collate_fn: Customized collate function to collect and combine data or a batch of data. Default function collates to Tensor(s) based on data type. Example: >>> # xdoctest: +SKIP >>> # Convert integer data to float Tensor >>> class MyIterDataPipe(torch.utils.data.IterDataPipe): ... def __init__(self, start, end): ... super(MyIterDataPipe).__init__() ... assert end > start, \"this example code only works with end >= start\" ... self.start = start ... self.end = end ... ... def __iter__(self): ... return iter(range(self.start, self.end)) ... ... def __len__(self): ... return self.end - self.start ... >>> ds = MyIterDataPipe(start=3, end=7) >>> print(list(ds)) [3, 4, 5, 6] >>> def collate_fn(batch): ... return torch.tensor(batch, dtype=torch.float) ... >>> collated_ds = CollateIterDataPipe(ds, collate_fn=collate_fn) >>> print(list(collated_ds)) [tensor(3.), tensor(4.), tensor(5.), tensor(6.)]", - "type": "class", - "file_path": "pytorch\\torch\\utils\\data\\datapipes\\iter\\callable.py", - "ast_data": "ClassDef name:CollatorIterDataPipe Call call:functional_datapipe FunctionDef name:__init__ arguments arg:self arg:datapipe type:IterDataPipe arg:conversion type:Union[Callable[..., Any], dict[Union[str, Any], Union[Callable, Any]], None] arg:collate_fn type:Optional[Callable] If Compare op:IsNot If Call call:callable Assign Call call:partial" - }, - { - "library": "authlib", - "name": "import_key", - "source_code": "@classmethod def import_key(cls, raw, options = None): kty = None if options is not None: kty = options.get('kty') if kty is None and isinstance(raw, dict): kty = raw.get('kty') if kty is None: raw_key = load_pem_key(raw) for _kty in cls.JWK_KEY_CLS: key_cls = cls.JWK_KEY_CLS[_kty] if key_cls.validate_raw_key(raw_key): return key_cls.import_key(raw_key, options) key_cls = cls.JWK_KEY_CLS[kty] return key_cls.import_key(raw, options)", - "docstring": "Import a Key from bytes, string, PEM or dict. :return: Key instance", - "type": "method", - "file_path": "authlib\\authlib\\jose\\rfc7517\\jwk.py", - "ast_data": "FunctionDef name:import_key arguments arg:cls arg:raw arg:options Assign If Compare op:IsNot Assign Call call:get If BoolOp Compare op:Is Call call:isinstance Assign Call call:get If Compare op:Is Assign Call call:load_pem_key For Assign If Call call:validate_raw_key Return return:yes Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_antialiased", - "source_code": "def set_antialiased(self, aa): self._antialiased = mpl._val_or_rc(aa, 'patch.antialiased') self.stale = True", - "docstring": "Set whether to use antialiased rendering. Parameters ---------- aa : bool or None", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:set_antialiased arguments arg:self arg:aa Assign Call call:_val_or_rc Assign" - }, - { - "library": "pytorch", - "name": "fetch_model", - "source_code": "def fetch_model(model_path, device, sparse_dlrm = False): if zipfile.is_zipfile(model_path): with zipfile.ZipFile(model_path, 'r', zipfile.ZIP_DEFLATED) as zip_ref: zip_ref.extractall(os.path.dirname(model_path)) unzip_path = model_path.replace('.zip', '.ckpt') else: unzip_path = model_path model = get_dlrm_model(sparse_dlrm = sparse_dlrm) model.load_state_dict(torch.load(unzip_path, map_location = device)) model = model.to(device) model.eval() if zipfile.is_zipfile(model_path): os.remove(unzip_path) return model", - "docstring": "This function unzips the zipped model checkpoint (if zipped) and returns a model object Args: model_path (str) path pointing to the zipped/raw model checkpoint file that was dumped in evaluate disk savings device (torch.device) device to which model needs to be loaded to", - "type": "function", - "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\data_sparsifier\\benchmarks\\dlrm_utils.py", - "ast_data": "FunctionDef name:fetch_model arguments arg:model_path arg:device arg:sparse_dlrm If Call call:is_zipfile With Assign Call call:replace Assign Assign Call call:get_dlrm_model Assign Call call:to If Call call:is_zipfile Return return:yes" - }, - { - "library": "coconut", - "name": "new_testlist_star_expr_handle", - "source_code": "def new_testlist_star_expr_handle(self, tokens): item, = tokens if (3, 5) < = self.target_info < = (3, 8): return '(' + item + ')' else: return item", - "docstring": "Handles new starred expressions that only started being allowed outside of parentheses in Python 3.9.", - "type": "method", - "file_path": "coconut\\coconut\\compiler\\compiler.py", - "ast_data": "FunctionDef name:new_testlist_star_expr_handle arguments arg:self arg:tokens Assign If Compare op:LtE op:LtE Return return:yes Return return:yes" - }, - { - "library": "sphinx", - "name": "is_classmethod_descriptor", - "source_code": "def is_classmethod_descriptor(obj: Any, cls: Any = None, name: str | None = None) -> TypeIs[types.ClassMethodDescriptorType]: if isinstance(obj, types.ClassMethodDescriptorType): return True if cls and name: sentinel = object() for basecls in getmro(cls): meth = basecls.__dict__.get(name, sentinel) if meth is not sentinel: return isinstance(meth, types.ClassMethodDescriptorType) return False", - "docstring": "Check if the object is a :class:. This check is stricter than :func: as a classmethod descriptor does not have a `` attribute.", - "type": "function", - "file_path": "sphinx\\sphinx\\util\\inspect.py", - "ast_data": "FunctionDef name:is_classmethod_descriptor arguments arg:obj type:Any arg:cls type:Any arg:name type:str | None If Call call:isinstance Return return:yes If BoolOp Assign Call call:object For Call call:getmro Assign Call call:get If Compare op:IsNot Return return:yes Return return:yes" - }, - { - "library": "scikit-learn", - "name": "device", - "source_code": "def device(*array_list, remove_none = True, remove_types = (str,)): array_list = _remove_non_arrays(*array_list, remove_none = remove_none, remove_types = remove_types) if not array_list: return None device_ = _single_array_device(array_list[0]) for array in array_list[1:]: device_other = _single_array_device(array) if device_ ! = device_other: raise ValueError(f'Input arrays use different devices: {device_}, {device_other}') return device_", - "docstring": "Hardware device where the array data resides on. If the hardware device is not the same for all arrays, an error is raised. Parameters ---------- *array_list : arrays List of array instances from NumPy or an array API compatible library. remove_none : bool, default=True Whether to ignore None objects passed in array_list. remove_types : tuple or list, default=(str,) Types to ignore in array_list. Returns ------- out : device object (see the \"Device Support\" section of the array API spec).", - "type": "function", - "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py", - "ast_data": "FunctionDef name:device arguments vararg:array_list Assign Call call:_remove_non_arrays If Return return:yes Assign Call call:_single_array_device For Assign Call call:_single_array_device If Compare op:NotEq Raise raises:ValueError(f'Input arrays use different devices: {device_}, {device_other}') Return return:yes" - }, - { - "library": "scrapy", - "name": "format_part_strings", - "source_code": "def format_part_strings(self, part_strings: list[str]) -> list[str]: if part_strings and part_strings[0].startswith('usage: '): part_strings[0] = 'Usage\\n = = = = = \\n ' + part_strings[0][len('usage: '):] headings = [i for i in range(len(part_strings)) if part_strings[i].endswith(': \\n')] for index in headings[: : -1]: char = '-' if 'Global Options' in part_strings[index] else ' = ' part_strings[index] = part_strings[index][: -2].title() underline = ''.join(['\\n', char * len(part_strings[index]), '\\n']) part_strings.insert(index + 1, underline) return part_strings", - "docstring": "Underline and title case command line help message headers.", - "type": "method", - "file_path": "scrapy\\scrapy\\commands\\__init__.py", - "ast_data": "FunctionDef name:format_part_strings arguments arg:self arg:part_strings type:list[str] If BoolOp Call call:startswith Assign Assign For Assign Assign Call call:title Assign Call call:join Return return:yes" - }, - { - "library": "django", - "name": "__str__", - "source_code": "def __str__(self): sql, params = self.sql_with_params() return sql % params", - "docstring": "Return the query as a string of SQL with the parameter values substituted in (use sql_with_params() to see the unsubstituted string). Parameter values won't necessarily be quoted correctly, since that is done by the database interface at execution time.", - "type": "method", - "file_path": "django\\django\\db\\models\\sql\\query.py", - "ast_data": "FunctionDef name:__str__ arguments arg:self Assign Call call:sql_with_params Return return:yes" - }, - { - "library": "tensorflow", - "name": "assert_has_rank", - "source_code": "def assert_has_rank(self, rank): if self.rank not in (None, rank): raise ValueError('Shape %s must have rank %d' % (self, rank))", - "docstring": "Raises an exception if is not compatible with the given . Args: rank: An integer. Raises: ValueError: If does not represent a shape with the given .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py", - "ast_data": "FunctionDef name:assert_has_rank arguments arg:self arg:rank If Compare op:NotIn Raise raises:ValueError('Shape %s must have rank %d' % (self, rank))" - }, - { - "library": "matplotlib", - "name": "file_requires_unicode", - "source_code": "def file_requires_unicode(x): try: x.write(b'') except TypeError: return True else: return False", - "docstring": "Return whether the given writable file-like object requires Unicode to be written to it.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\cbook.py", - "ast_data": "FunctionDef name:file_requires_unicode arguments arg:x Try ExceptHandler Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "__new__", - "source_code": "def __new__(cls, *system, **kwargs): if len(system) = = 1 and isinstance(system[0], LinearTimeInvariant): return system[0].to_ss() if cls is StateSpace: if kwargs.get('dt') is None: return StateSpaceContinuous.__new__(StateSpaceContinuous, *system, **kwargs) else: return StateSpaceDiscrete.__new__(StateSpaceDiscrete, *system, **kwargs) return super().__new__(cls)", - "docstring": "Create new StateSpace object and settle inheritance.", - "type": "method", - "file_path": "scipy\\scipy\\signal\\_ltisys.py", - "ast_data": "FunctionDef name:__new__ arguments arg:cls vararg:system kwarg:kwargs If BoolOp Compare op:Eq Call call:isinstance Return return:yes If Compare op:Is If Compare op:Is Return return:yes Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "modules", - "source_code": "def modules(self) -> Iterator['Module']: for _, module in self.named_modules(): yield module", - "docstring": "Return an iterator over all modules in the network. Yields: Module: a module in the network Note: Duplicate modules are returned only once. In the following example, `` will be returned only once. Example:: >>> l = nn.Linear(2, 2) >>> net = nn.Sequential(l, l) >>> for idx, m in enumerate(net.modules()): ... print(idx, '->', m) 0 -> Sequential( (0): Linear(in_features=2, out_features=2, bias=True) (1): Linear(in_features=2, out_features=2, bias=True) ) 1 -> Linear(in_features=2, out_features=2, bias=True)", - "type": "method", - "file_path": "pytorch\\torch\\nn\\modules\\module.py", - "ast_data": "FunctionDef name:modules arguments arg:self For Call call:named_modules" - }, - { - "library": "scikit-learn", - "name": "get_n_splits", - "source_code": "def get_n_splits(self, X = None, y = None, groups = None): return self.n_splits", - "docstring": "Returns the number of splitting iterations in the cross-validator. Parameters ---------- X : object Always ignored, exists for compatibility. y : object Always ignored, exists for compatibility. groups : object Always ignored, exists for compatibility. Returns ------- n_splits : int Returns the number of splitting iterations in the cross-validator.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py", - "ast_data": "FunctionDef name:get_n_splits arguments arg:self arg:X arg:y arg:groups Return return:yes" - }, - { - "library": "tensorflow", - "name": "stop", - "source_code": "def stop(self, timeout = None): self.stop_signal.set() with self.queue.mutex: self.queue.queue.clear() self.queue.unfinished_tasks = 0 self.queue.not_full.notify() self.run_thread.join(timeout) _SHARED_SEQUENCES[self.uid] = None", - "docstring": "Stops running threads and wait for them to exit, if necessary. Should be called by the same thread which called . Args: timeout: maximum time to wait on", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py", - "ast_data": "FunctionDef name:stop arguments arg:self arg:timeout With Assign Assign" - }, - { - "library": "scipy", - "name": "collocation_fun", - "source_code": "def collocation_fun(fun, y, p, x, h): f = fun(x, y, p) y_middle = 0.5 * (y[:, 1:] + y[:, : -1]) - 0.125 * h * (f[:, 1:] - f[:, : -1]) f_middle = fun(x[: -1] + 0.5 * h, y_middle, p) col_res = y[:, 1:] - y[:, : -1] - h / 6 * (f[:, : -1] + f[:, 1:] + 4 * f_middle) return (col_res, y_middle, f, f_middle)", - "docstring": "Evaluate collocation residuals. This function lies in the core of the method. The solution is sought as a cubic C1 continuous spline with derivatives matching the ODE rhs at given nodes . Collocation conditions are formed from the equality of the spline derivatives and rhs of the ODE system in the middle points between nodes. Such method is classified to Lobbato IIIA family in ODE literature. Refer to [1]_ for the formula and some discussion. Returns ------- col_res : ndarray, shape (n, m - 1) Collocation residuals at the middle points of the mesh intervals. y_middle : ndarray, shape (n, m - 1) Values of the cubic spline evaluated at the middle points of the mesh intervals. f : ndarray, shape (n, m) RHS of the ODE system evaluated at the mesh nodes. f_middle : ndarray, shape (n, m - 1) RHS of the ODE system evaluated at the middle points of the mesh intervals (and using ). References ---------- .. [1] J. Kierzenka, L. F. Shampine, \"A BVP Solver Based on Residual Control and the Maltab PSE\", ACM Trans. Math. Softw., Vol. 27, Number 3, pp. 299-316, 2001.", - "type": "function", - "file_path": "scipy\\scipy\\integrate\\_bvp.py", - "ast_data": "FunctionDef name:collocation_fun arguments arg:fun arg:y arg:p arg:x arg:h Assign Call call:fun Assign Assign Call call:fun Assign Return return:yes" - }, - { - "library": "django", - "name": "postgis_version", - "source_code": "def postgis_version(self): return self._get_postgis_func('postgis_version')", - "docstring": "Return PostGIS version number and compile-time options.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\db\\backends\\postgis\\operations.py", - "ast_data": "FunctionDef name:postgis_version arguments arg:self Return return:yes" - }, - { - "library": "mongo", - "name": "mark_dirty", - "source_code": "def mark_dirty(self) -> None: self.dirty = True", - "docstring": "Mark this session as dirty. A server session is marked dirty when a command fails with a network error. Dirty sessions are later discarded from the server session pool.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\client_session.py", - "ast_data": "FunctionDef name:mark_dirty arguments arg:self Assign" - }, - { - "library": "django", - "name": "staff_member_required", - "source_code": "def staff_member_required(view_func = None, redirect_field_name = REDIRECT_FIELD_NAME, login_url = 'admin: login'): actual_decorator = user_passes_test(lambda u: u.is_active and u.is_staff, login_url = login_url, redirect_field_name = redirect_field_name) if view_func: return actual_decorator(view_func) return actual_decorator", - "docstring": "Decorator for views that checks that the user is logged in and is a staff member, redirecting to the login page if necessary.", - "type": "function", - "file_path": "django\\django\\contrib\\admin\\views\\decorators.py", - "ast_data": "FunctionDef name:staff_member_required arguments arg:view_func arg:redirect_field_name arg:login_url Assign Call call:user_passes_test If Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "get_native_backend_config_dict", - "source_code": "def get_native_backend_config_dict(): return get_native_backend_config().to_dict()", - "docstring": "Return the for PyTorch Native backend (fbgemm/qnnpack) in dictionary form.", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\native.py", - "ast_data": "FunctionDef name:get_native_backend_config_dict arguments Return return:yes" - }, - { - "library": "pytorch", - "name": "map_arg", - "source_code": "@compatibility(is_backward_compatible = True) def map_arg(a: ArgumentT, fn: Callable[[Node], Argument]) -> ArgumentT: assert callable(fn), 'torch.fx.map_arg(a, fn): fn must be a callable' return _fx_map_arg(a, fn)", - "docstring": "Apply fn recursively to each Node appearing in arg. arg may be a list, tuple, slice, or dict with string keys: the return value will have the same type and structure.", - "type": "function", - "file_path": "pytorch\\torch\\fx\\node.py", - "ast_data": "FunctionDef name:map_arg arguments arg:a type:ArgumentT arg:fn type:Callable[[Node], Argument] Call call:compatibility Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, cluster): if isinstance(cluster, dict): self._cluster_spec = {} for job_name, tasks in cluster.items(): if isinstance(tasks, (list, tuple)): job_tasks = {i: task for i, task in enumerate(tasks)} elif isinstance(tasks, dict): job_tasks = {int(i): task for i, task in tasks.items()} else: raise TypeError('The tasks for job %r must be a list or a dictionary from integers to strings.' % job_name) self._cluster_spec[job_name] = job_tasks self._make_cluster_def() elif isinstance(cluster, cluster_pb2.ClusterDef): self._cluster_def = cluster self._cluster_spec = {} for job_def in self._cluster_def.job: self._cluster_spec[job_def.name] = {i: t for i, t in job_def.tasks.items()} elif isinstance(cluster, ClusterSpec): self._cluster_def = cluster_pb2.ClusterDef() self._cluster_def.MergeFrom(cluster.as_cluster_def()) self._cluster_spec = {} for job_def in self._cluster_def.job: self._cluster_spec[job_def.name] = {i: t for i, t in job_def.tasks.items()} else: raise TypeError('`cluster` must be a dictionary mapping one or more job names to lists of network addresses, or a `ClusterDef` protocol buffer')", - "docstring": "Creates a . Args: cluster: A dictionary mapping one or more job names to (i) a list of network addresses, or (ii) a dictionary mapping integer task indices to network addresses; or a protocol buffer. Raises: TypeError: If is not a dictionary mapping strings to lists of strings, and not a protobuf.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\server_lib.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:cluster If Call call:isinstance Assign For Call call:items If Call call:isinstance Assign If Call call:isinstance Assign Raise raises:TypeError('The tasks for job %r must be a list or a dictionary from integers to strings.' % job_name) Assign If Call call:isinstance Assign Assign For Assign If Call call:isinstance Assign Call call:ClusterDef Assign For Assign Raise raises:TypeError('`cluster` must be a dictionary mapping one or more job names to lists of network addresses, or a `ClusterDef` protocol buffer')" - }, - { - "library": "matplotlib", - "name": "get_window_title", - "source_code": "def get_window_title(self): return self._window_title", - "docstring": "Return the title text of the window containing the figure.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", - "ast_data": "FunctionDef name:get_window_title arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, left = None, bottom = None, right = None, top = None, wspace = None, hspace = None): for key in ['left', 'bottom', 'right', 'top', 'wspace', 'hspace']: setattr(self, key, mpl.rcParams[f'figure.subplot.{key}']) self.update(left, bottom, right, top, wspace, hspace)", - "docstring": "Defaults are given by :rc:. Parameters ---------- left : float, optional The position of the left edge of the subplots, as a fraction of the figure width. right : float, optional The position of the right edge of the subplots, as a fraction of the figure width. bottom : float, optional The position of the bottom edge of the subplots, as a fraction of the figure height. top : float, optional The position of the top edge of the subplots, as a fraction of the figure height. wspace : float, optional The width of the padding between subplots, as a fraction of the average Axes width. hspace : float, optional The height of the padding between subplots, as a fraction of the average Axes height.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\gridspec.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:left arg:bottom arg:right arg:top arg:wspace arg:hspace For" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, grpc_debug_server_addresses, thread_name_filter = None, send_traceback_and_source_code = True): def _gated_grpc_watch_fn(fetches, feeds): del fetches, feeds return framework.WatchOptions(debug_ops = ['DebugIdentity(gated_grpc = true)']) super(TensorBoardDebugHook, self).__init__(grpc_debug_server_addresses, watch_fn = _gated_grpc_watch_fn, thread_name_filter = thread_name_filter) self._grpc_debug_server_addresses = grpc_debug_server_addresses self._send_traceback_and_source_code = send_traceback_and_source_code self._sent_graph_version = -1 grpc_wrapper.register_signal_handler()", - "docstring": "Constructor of TensorBoardDebugHook. Args: grpc_debug_server_addresses: gRPC address(es) of debug server(s), as a or a of s. E.g., \"localhost:2333\", \"grpc://localhost:2333\", [\"192.168.0.7:2333\", \"192.168.0.8:2333\"]. thread_name_filter: Optional filter for thread names. send_traceback_and_source_code: Whether traceback of graph elements and the source code are to be sent to the debug server(s).", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\wrappers\\hooks.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:grpc_debug_server_addresses arg:thread_name_filter arg:send_traceback_and_source_code FunctionDef name:_gated_grpc_watch_fn arguments arg:fetches arg:feeds Return return:yes Assign Assign Assign" - }, - { - "library": "pytorch", - "name": "map_placements_after_broadcast", - "source_code": "def map_placements_after_broadcast(placements: tuple[Placement, ...], shape: torch.Size, broadcast_dims_map: list[int]) -> tuple[Placement, ...]: new_placements: list[Placement] = [] for placement in placements: if isinstance(placement, (Replicate, Partial)): new_placements.append(placement) else: assert isinstance(placement, Shard) shard_dim = normalize_dim(placement.dim, len(shape)) new_shard_dim = broadcast_dims_map[shard_dim] if new_shard_dim ! = -1: new_placements.append(Shard(new_shard_dim)) else: new_placements.append(Replicate()) return tuple(new_placements)", - "docstring": "Map each placement based on the output shape after broadcast.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\tensor\\_ops\\utils.py", - "ast_data": "FunctionDef name:map_placements_after_broadcast arguments arg:placements type:tuple[Placement, ...] arg:shape type:torch.Size arg:broadcast_dims_map type:list[int] For If Call call:isinstance Assign Call call:normalize_dim Assign If Compare op:NotEq Return return:yes" - }, - { - "library": "pandas", - "name": "UnsupportedFunctionCall", - "source_code": "class UnsupportedFunctionCall(ValueError): pass", - "docstring": "Exception raised when attempting to call a unsupported numpy function. For example, ``. See Also -------- DataFrame.groupby : Group DataFrame using a mapper or by a Series of columns. Series.groupby : Group Series using a mapper or by a Series of columns. core.groupby.GroupBy.cumsum : Compute cumulative sum for each group. Examples -------- >>> df = pd.DataFrame( ... {\"A\": [0, 0, 1, 1], \"B\": [\"x\", \"x\", \"z\", \"y\"], \"C\": [1, 2, 3, 4]} ... ) >>> np.cumsum(df.groupby([\"A\"])) Traceback (most recent call last): UnsupportedFunctionCall: numpy operations are not valid with groupby. Use .groupby(...).cumsum() instead", - "type": "class", - "file_path": "pandas\\pandas\\errors\\__init__.py", - "ast_data": "ClassDef name:UnsupportedFunctionCall" - }, - { - "library": "scikit-learn", - "name": "fit_predict", - "source_code": "def fit_predict(self, X, y = None): self.fit(X) return self.labels_", - "docstring": "Cluster X and return the associated cluster labels. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features), or ndarray of shape (n_samples, n_samples) A feature array, or array of distances between samples if . y : None Ignored. Returns ------- y : ndarray of shape (n_samples,) Cluster labels.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\cluster\\_hdbscan\\hdbscan.py", - "ast_data": "FunctionDef name:fit_predict arguments arg:self arg:X arg:y Return return:yes" - }, - { - "library": "tensorflow", - "name": "wait_for_new_checkpoint", - "source_code": "def wait_for_new_checkpoint(checkpoint_dir, last_checkpoint = None, seconds_to_sleep = 1, timeout = None): logging.info('Waiting for new checkpoint at %s', checkpoint_dir) stop_time = time.time() + timeout if timeout is not None else None while True: checkpoint_path = checkpoint_management.latest_checkpoint(checkpoint_dir) if checkpoint_path is None or checkpoint_path = = last_checkpoint: if stop_time is not None and time.time() + seconds_to_sleep > stop_time: return None time.sleep(seconds_to_sleep) else: logging.info('Found new checkpoint at %s', checkpoint_path) return checkpoint_path", - "docstring": "Waits until a new checkpoint file is found. Args: checkpoint_dir: The directory in which checkpoints are saved. last_checkpoint: The last checkpoint path used or if we're expecting a checkpoint for the first time. seconds_to_sleep: The number of seconds to sleep for before looking for a new checkpoint. timeout: The maximum number of seconds to wait. If left as , then the process will wait indefinitely. Returns: a new checkpoint path, or None if the timeout was reached.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\training\\checkpoint_utils.py", - "ast_data": "FunctionDef name:wait_for_new_checkpoint arguments arg:checkpoint_dir arg:last_checkpoint arg:seconds_to_sleep arg:timeout Assign While Assign Call call:latest_checkpoint If BoolOp Compare op:Is Compare op:Eq If BoolOp Compare op:IsNot Compare op:Gt Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "check_axis_name", - "source_code": "@staticmethod def check_axis_name(name: str) -> bool: is_valid, _ = ParsedExpression.check_axis_name_return_reason(name) return is_valid", - "docstring": "Check if the name is a valid axis name. Args: name (str): the axis name to check Returns: bool: whether the axis name is valid", - "type": "method", - "file_path": "pytorch\\functorch\\einops\\_parsing.py", - "ast_data": "FunctionDef name:check_axis_name arguments arg:name type:str Assign Call call:check_axis_name_return_reason Return return:yes" - }, - { - "library": "tensorflow", - "name": "reset_captures", - "source_code": "def reset_captures(self, tensors, placeholders): self._by_val_external = MutationAwareDict() self._by_val_internal = MutationAwareDict() self._by_val_tracetype = MutationAwareDict() for external, internal in zip(tensors, placeholders): key = id(external) self._by_val_external[key] = external self._by_val_internal[key] = internal self._by_val_tracetype[key] = trace_type.from_value(external)", - "docstring": "Set the captures with the provided list of captures & placeholder.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\core\\function\\capture\\capture_container.py", - "ast_data": "FunctionDef name:reset_captures arguments arg:self arg:tensors arg:placeholders Assign Call call:MutationAwareDict Assign Call call:MutationAwareDict Assign Call call:MutationAwareDict For Call call:zip Assign Call call:id Assign Assign Assign Call call:from_value" - }, - { - "library": "kornia", - "name": "BlobHessian", - "source_code": "class BlobHessian(Module): def __init__(self, grads_mode: str = 'sobel') -> None: super().__init__() self.grads_mode: str = grads_mode def __repr__(self) -> str: return f'{self.__class__.__name__}(grads_mode = {self.grads_mode})' def forward(self, input: Tensor, sigmas: Optional[Tensor] = None) -> Tensor: return hessian_response(input, self.grads_mode, sigmas)", - "docstring": "Module that calculates Hessian blobs. .. image:: _static/img/hessian_response.png See :func: for details.", - "type": "class", - "file_path": "kornia\\kornia\\feature\\responses.py", - "ast_data": "ClassDef name:BlobHessian FunctionDef name:__init__ arguments arg:self arg:grads_mode type:str FunctionDef name:__repr__ arguments arg:self Return return:yes FunctionDef name:forward arguments arg:self arg:input type:Tensor arg:sigmas type:Optional[Tensor] Return return:yes" - }, - { - "library": "tensorflow", - "name": "init_feed_dict", - "source_code": "@property def init_feed_dict(self): return self._init_feed_dict", - "docstring": "Return the feed dictionary used when evaluating the . Returns: A feed dictionary or .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\supervisor.py", - "ast_data": "FunctionDef name:init_feed_dict arguments arg:self Return return:yes" - }, - { - "library": "mongo", - "name": "eq_props", - "source_code": "def eq_props(self) -> tuple[tuple[_Address, ...], Optional[str], Optional[str], str]: ts = self._settings return (tuple(sorted(ts.seeds)), ts.replica_set_name, ts.fqdn, ts.srv_service_name)", - "docstring": "The properties to use for MongoClient/Topology equality checks.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\topology.py", - "ast_data": "FunctionDef name:eq_props arguments arg:self Assign Return return:yes" - }, - { - "library": "mongo", - "name": "open_upload_stream_with_id", - "source_code": "def open_upload_stream_with_id(self, file_id: Any, filename: str, chunk_size_bytes: Optional[int] = None, metadata: Optional[Mapping[str, Any]] = None, session: Optional[ClientSession] = None) -> GridIn: validate_string('filename', filename) opts = {'_id': file_id, 'filename': filename, 'chunk_size': chunk_size_bytes if chunk_size_bytes is not None else self._chunk_size_bytes} if metadata is not None: opts['metadata'] = metadata return GridIn(self._collection, session = session, **opts)", - "docstring": "Opens a Stream that the application can write the contents of the file to. The user must specify the file id and filename, and can choose to add any additional information in the metadata field of the file document or modify the chunk size. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) with fs.open_upload_stream_with_id( ObjectId(), \"test_file\", chunk_size_bytes=4, metadata={\"contentType\": \"text/plain\"}) as grid_in: grid_in.write(\"data I want to store!\") # uploaded on close Returns an instance of :class:. Raises :exc: if no such version of that file exists. Raises :exc: if is not a string. :param file_id: The id to use for this file. The id must not have already been used for another file. :param filename: The name of the file to upload. :param chunk_size_bytesGridFSBucket~pymongo.client_session.ClientSession` parameter.", - "type": "method", - "file_path": "mongo\\gridfs\\synchronous\\grid_file.py", - "ast_data": "FunctionDef name:open_upload_stream_with_id arguments arg:self arg:file_id type:Any arg:filename type:str arg:chunk_size_bytes type:Optional[int] arg:metadata type:Optional[Mapping[str, Any]] arg:session type:Optional[ClientSession] Assign If Compare op:IsNot Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_next_as_optional", - "source_code": "@deprecation.deprecated(None, 'Use `tf.data.Iterator.get_next_as_optional()` instead.') @tf_export('data.experimental.get_next_as_optional') def get_next_as_optional(iterator): return iterator.get_next_as_optional()", - "docstring": "Returns a with the next element of the iterator. If the iterator has reached the end of the sequence, the returned will have no value. Args: iterator: A . Returns: A object which either contains the next element of the iterator (if it exists) or no value.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\data\\ops\\iterator_ops.py", - "ast_data": "FunctionDef name:get_next_as_optional arguments arg:iterator Call call:deprecated Call call:tf_export Return return:yes" - }, - { - "library": "kornia", - "name": "create_meshgrid3d", - "source_code": "def create_meshgrid3d(depth: int, height: int, width: int, normalized_coordinates: bool = True, device: Optional[torch.device] = None, dtype: Optional[torch.dtype] = None) -> Tensor: xs: Tensor = torch.linspace(0, width - 1, width, device = device, dtype = dtype) ys: Tensor = torch.linspace(0, height - 1, height, device = device, dtype = dtype) zs: Tensor = torch.linspace(0, depth - 1, depth, device = device, dtype = dtype) if normalized_coordinates: xs = (xs / (width - 1) - 0.5) * 2 ys = (ys / (height - 1) - 0.5) * 2 zs = (zs / (depth - 1) - 0.5) * 2 base_grid = stack(torch_meshgrid([zs, xs, ys], indexing = 'ij'), dim = -1) return base_grid.permute(0, 2, 1, 3).unsqueeze(0)", - "docstring": "Generate a coordinate grid for an image. When the flag `[-1,1]torch.nn.functional.grid_sample[-1,1]torch.nn.functional.grid_sample(1, D, H, W, 3)`.", - "type": "function", - "file_path": "kornia\\kornia\\utils\\grid.py", - "ast_data": "FunctionDef name:create_meshgrid3d arguments arg:depth type:int arg:height type:int arg:width type:int arg:normalized_coordinates type:bool arg:device type:Optional[torch.device] arg:dtype type:Optional[torch.dtype] If Assign Assign Assign Assign Call call:stack Return return:yes" - }, - { - "library": "pandas", - "name": "tolist", - "source_code": "def tolist(self) -> list: if self.ndim > 1: return [x.tolist() for x in self] return list(self)", - "docstring": "Return a list of the values. These are each a scalar type, which is a Python scalar (for str, int, float) or a pandas scalar (for Timestamp/Timedelta/Interval/Period) Returns ------- list Python list of values in array. See Also -------- Index.to_list: Return a list of the values in the Index. Series.to_list: Return a list of the values in the Series. Examples -------- >>> arr = pd.array([1, 2, 3]) >>> arr.tolist() [1, 2, 3]", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\base.py", - "ast_data": "FunctionDef name:tolist arguments arg:self If Compare op:Gt Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "__rtruediv__", - "source_code": "def __rtruediv__(self, other): raise TypeError(\"unsupported operand type(s) for /: '{}' and 'Dimension', please use // instead\".format(type(other).__name__))", - "docstring": "Use via instead. This function exists only to have a better error message. Instead of: , this function will explicitly call for usage of instead. Args: other: Another . Raises: TypeError.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\tensor_shape.py", - "ast_data": "FunctionDef name:__rtruediv__ arguments arg:self arg:other Raise raises:TypeError(\"unsupported operand type(s) for /: '{}' and 'Dimension', please use // instead\".format(type(other).__name__))" - }, - { - "library": "pygame", - "name": "pixels3d", - "source_code": "def pixels3d(surface): return numpy_array(surface.get_view('3'), copy = False)", - "docstring": "pygame.surfarray.pixels3d(Surface): return array reference pixels into a 3d array Create a new 3D array that directly references the pixel values in a Surface. Any changes to the array will affect the pixels in the Surface. This is a fast operation since no data is copied. This will only work on Surfaces that have 24-bit or 32-bit formats. Lower pixel formats cannot be referenced. The Surface this references will remain locked for the lifetime of the array (see the Surface.lock - lock the Surface memory for pixel access method).", - "type": "function", - "file_path": "pygame\\src_py\\surfarray.py", - "ast_data": "FunctionDef name:pixels3d arguments arg:surface Return return:yes" - }, - { - "library": "numpy", - "name": "polypow", - "source_code": "def polypow(c, pow, maxpower = None): return pu._pow(np.convolve, c, pow, maxpower)", - "docstring": "Raise a polynomial to a power. Returns the polynomial raised to the power . The argument is a sequence of coefficients ordered from low to high. i.e., [1,2,3] is the series `` Parameters ---------- c : array_like 1-D array of array of series coefficients ordered from low to high degree. pow : integer Power to which the series will be raised maxpower : integer, optional Maximum power allowed. This is mainly to limit growth of the series to unmanageable size. Default is 16 Returns ------- coef : ndarray Power series of power. See Also -------- polyadd, polysub, polymulx, polymul, polydiv Examples -------- >>> from numpy.polynomial import polynomial as P >>> P.polypow([1, 2, 3], 2) array([ 1., 4., 10., 12., 9.])", - "type": "function", - "file_path": "numpy\\numpy\\polynomial\\polynomial.py", - "ast_data": "FunctionDef name:polypow arguments arg:c arg:pow arg:maxpower Return return:yes" - }, - { - "library": "matplotlib", - "name": "writeInfoDict", - "source_code": "def writeInfoDict(self): self.infoObject = self.reserveObject('info') self.writeObject(self.infoObject, self.infoDict)", - "docstring": "Write out the info dictionary, checking it for good form", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py", - "ast_data": "FunctionDef name:writeInfoDict arguments arg:self Assign Call call:reserveObject" - }, - { - "library": "pytorch", - "name": "from_dict", - "source_code": "@classmethod def from_dict(cls, fuse_custom_config_dict: dict[str, Any]) -> FuseCustomConfig: conf = cls() conf.set_preserved_attributes(fuse_custom_config_dict.get(PRESERVED_ATTRIBUTES_DICT_KEY, [])) return conf", - "docstring": "Create a `` This function is primarily for backward compatibility and may be removed in the future.", - "type": "method", - "file_path": "pytorch\\torch\\ao\\quantization\\fx\\custom_config.py", - "ast_data": "FunctionDef name:from_dict arguments arg:cls arg:fuse_custom_config_dict type:dict[str, Any] Assign Call call:cls Return return:yes" - }, - { - "library": "scikit-learn", - "name": "get_feature_names_out", - "source_code": "def get_feature_names_out(self, input_features = None): self._check_vocabulary() return np.asarray([t for t, i in sorted(self.vocabulary_.items(), key = itemgetter(1))], dtype = object)", - "docstring": "Get output feature names for transformation. Parameters ---------- input_features : array-like of str or None, default=None Not used, present here for API consistency by convention. Returns ------- feature_names_out : ndarray of str objects Transformed feature names.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py", - "ast_data": "FunctionDef name:get_feature_names_out arguments arg:self arg:input_features Return return:yes" - }, - { - "library": "tensorflow", - "name": "is_subtype_of", - "source_code": "def is_subtype_of(self, other: 'Parameter') -> bool: if not self.type_constraint or not other.type_constraint: raise TypeError('Can not determine relationship between partially specified types.') if (self.name, self.kind, self.optional) ! = (other.name, other.kind, other.optional): return False return self.type_constraint.is_subtype_of(other.type_constraint)", - "docstring": "Returns True if self is a supertype of other Parameter.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\core\\function\\polymorphism\\function_type.py", - "ast_data": "FunctionDef name:is_subtype_of arguments arg:self arg:other type:'Parameter' If BoolOp Raise raises:TypeError('Can not determine relationship between partially specified types.') If Compare op:NotEq Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "prepared", - "source_code": "@property def prepared(self): return PreparedGeometry(self)", - "docstring": "Return a PreparedGeometry corresponding to this geometry -- it is optimized for the contains, intersects, and covers operations.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\geos\\geometry.py", - "ast_data": "FunctionDef name:prepared arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, name, default_name = None, values = None) -> None: if not (default_name is None or isinstance(default_name, str)): raise TypeError('`default_name` type (%s) is not a string type. You likely meant to pass this into the `values` kwarg.' % type(default_name)) self._name = default_name if name is None else name self._default_name = default_name self._values = values", - "docstring": "Initialize the context manager. Args: name: The name argument that is passed to the op function. default_name: The default name to use if the argument is . values: The list of arguments that are passed to the op function. Raises: TypeError: if is passed in but not a string.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:name arg:default_name arg:values If Raise raises:TypeError('`default_name` type (%s) is not a string type. You likely meant to pass this into the `values` kwarg.' % type(default_name)) Assign Assign Assign" - }, - { - "library": "django", - "name": "clean_username", - "source_code": "def clean_username(self, username): return username", - "docstring": "Perform any cleaning on the \"username\" prior to using it to get or create the user object. Return the cleaned username. By default, return the username unchanged.", - "type": "method", - "file_path": "django\\django\\contrib\\auth\\backends.py", - "ast_data": "FunctionDef name:clean_username arguments arg:self arg:username Return return:yes" - }, - { - "library": "algorithms", - "name": "cycle_product", - "source_code": "def cycle_product(m1: Monomial, m2: Monomial) -> Monomial: assert isinstance(m1, Monomial) and isinstance(m2, Monomial) A = m1.variables B = m2.variables result_variables = dict() for i in A: for j in B: k = lcm(i, j) g = i * j // k if k in result_variables: result_variables[k] + = A[i] * B[j] * g else: result_variables[k] = A[i] * B[j] * g return Monomial(result_variables, Fraction(m1.coeff * m2.coeff, 1))", - "docstring": "Given two monomials (from the cycle index of a symmetry group), compute the resultant monomial in the cartesian product corresponding to their merging.", - "type": "function", - "file_path": "algorithms\\algorithms\\maths\\symmetry_group_cycle_index.py", - "ast_data": "FunctionDef name:cycle_product arguments arg:m1 type:Monomial arg:m2 type:Monomial Assign Assign Assign Call call:dict For For Assign Call call:lcm Assign If Compare op:In Assign Return return:yes" - }, - { - "library": "scipy", - "name": "reset_state", - "source_code": "@contextlib.contextmanager def reset_state(): with set_state(get_state()): yield", - "docstring": "Returns a context manager that resets all state once exited. See Also -------- set_state Context manager that sets the backend state. get_state Gets a state to be set by this context manager.", - "type": "function", - "file_path": "scipy\\scipy\\_lib\\_uarray\\_backend.py", - "ast_data": "FunctionDef name:reset_state arguments With" - }, - { - "library": "scikit-learn", - "name": "get_config", - "source_code": "def get_config(): return _get_threadlocal_config().copy()", - "docstring": "Retrieve current values for configuration set by :func:. Returns ------- config : dict Keys are parameter names that can be passed to :func:. See Also -------- config_context : Context manager for global scikit-learn configuration. set_config : Set global scikit-learn configuration. Examples -------- >>> import sklearn >>> config = sklearn.get_config() >>> config.keys() dict_keys([...])", - "type": "function", - "file_path": "scikit-learn\\sklearn\\_config.py", - "ast_data": "FunctionDef name:get_config arguments Return return:yes" - }, - { - "library": "pytorch", - "name": "forward_one_chunk", - "source_code": "def forward_one_chunk(self, fwd_chunk_id: int, args: tuple[Any, ...], kwargs: Optional[dict[str, Any]] = None): if self.is_first: composite_args = args else: composite_args = self._retrieve_recv_activations(fwd_chunk_id) composite_kwargs = kwargs or {} self._validate_fwd_input(args, kwargs) try: output = self.forward_maybe_with_nosync(*composite_args, **composite_kwargs) except Exception as e: exc_msg = f'\\n {self.log_prefix} failed to run forward: \\n args: {map_debug_info(composite_args)}\\n kwargs: {map_debug_info(composite_kwargs)}\\n ' raise RuntimeError(exc_msg) from e output_tuple = _normalize_model_output_as_tuple(output) if self.is_last: self.output_chunks.append(output) flat_args = flatten_args(composite_args) flat_kwargs = flatten_args(composite_kwargs) flatten_input_tensors = flat_args + flat_kwargs self.fwd_cache[fwd_chunk_id] = (output_tuple, flatten_input_tensors) logger.debug('%s Forwarded chunk %s, outputs: %s', self.log_prefix, fwd_chunk_id, map_debug_info(output)) self._validate_fwd_outputs(output_tuple) return output", - "docstring": "Perform forward pass on the stage with one microbatch. and are the inputs from *external* to this stage. As of Sept 2024: - applies to the first stage only, other stages receives args through activation transmission. - can be passed to all stages via respective calls.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\pipelining\\stage.py", - "ast_data": "FunctionDef name:forward_one_chunk arguments arg:self arg:fwd_chunk_id type:int arg:args type:tuple[Any, ...] arg:kwargs type:Optional[dict[str, Any]] If Assign Assign Call call:_retrieve_recv_activations Assign BoolOp Try Assign Call call:forward_maybe_with_nosync ExceptHandler Assign Raise raises:RuntimeError(exc_msg) Assign Call call:_normalize_model_output_as_tuple If Assign Call call:flatten_args Assign Call call:flatten_args Assign Assign Return return:yes" - }, - { - "library": "mongo", - "name": "select_servers", - "source_code": "def select_servers(self, selector: Callable[[Selection], Selection], operation: str, server_selection_timeout: Optional[float] = None, address: Optional[_Address] = None, operation_id: Optional[int] = None) -> list[Server]: if server_selection_timeout is None: server_timeout = self.get_server_selection_timeout() else: server_timeout = server_selection_timeout if not _IS_SYNC and self._monitor_tasks: self.cleanup_monitors() with self._lock: server_descriptions = self._select_servers_loop(selector, server_timeout, operation, operation_id, address) return [cast(Server, self.get_server_by_address(sd.address)) for sd in server_descriptions]", - "docstring": "Return a list of Servers matching selector, or time out. :param selector: function that takes a list of Servers and returns a subset of them. :param operation: The name of the operation that the server is being selected for. :param server_selection_timeout: maximum seconds to wait. If not provided, the default value common.SERVER_SELECTION_TIMEOUT is used. :param address: optional server address to select. Calls self.open() if needed. Raises exc: after if no matching servers are found.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\topology.py", - "ast_data": "FunctionDef name:select_servers arguments arg:self arg:selector type:Callable[[Selection], Selection] arg:operation type:str arg:server_selection_timeout type:Optional[float] arg:address type:Optional[_Address] arg:operation_id type:Optional[int] If Compare op:Is Assign Call call:get_server_selection_timeout Assign If BoolOp With Assign Call call:_select_servers_loop Return return:yes" - }, - { - "library": "pytorch", - "name": "countable_fx", - "source_code": "def countable_fx(node: torch.fx.Node) -> bool: assert isinstance(node, torch.fx.Node) if not hasattr(node, 'target'): return False target = node.target if not hasattr(target, 'overloadpacket'): return target in flop_registry packet = target.overloadpacket return packet in flop_registry", - "docstring": "Whether or not we can count the flops of an FX node.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\fx_utils.py", - "ast_data": "FunctionDef name:countable_fx arguments arg:node type:torch.fx.Node If Return return:yes Assign If Return return:yes Assign Return return:yes" - }, - { - "library": "pandas", - "name": "maybe_coerce_values", - "source_code": "def maybe_coerce_values(values: ArrayLike) -> ArrayLike: if isinstance(values, np.ndarray): values = ensure_wrapped_if_datetimelike(values) if issubclass(values.dtype.type, str): values = np.array(values, dtype = object) if isinstance(values, (DatetimeArray, TimedeltaArray)) and values.freq is not None: values = values._with_freq(None) return values", - "docstring": "Input validation for values passed to __init__. Ensure that any datetime64/timedelta64 dtypes are in nanoseconds. Ensure that we do not have string dtypes. Parameters ---------- values : np.ndarray or ExtensionArray Returns ------- values : np.ndarray or ExtensionArray", - "type": "function", - "file_path": "pandas\\pandas\\core\\internals\\blocks.py", - "ast_data": "FunctionDef name:maybe_coerce_values arguments arg:values type:ArrayLike If Call call:isinstance Assign Call call:ensure_wrapped_if_datetimelike If Call call:issubclass Assign Call call:array If BoolOp Call call:isinstance Compare op:IsNot Assign Call call:_with_freq Return return:yes" - }, - { - "library": "scipy", - "name": "NewFunction02", - "source_code": "class NewFunction02(Benchmark): def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N)) self.global_optimum = [[-9.94114736324, -9.99997128772]] self.fglob = -0.199409030092 def fun(self, x, *args): self.nfev + = 1 return abs(sin(sqrt(abs(x[0] ** 2 + x[1])))) ** 0.5 + 0.01 * (x[0] + x[1])", - "docstring": "NewFunction02 objective function. This class defines the NewFunction02 global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{NewFunction02}}(x) = \\left | {\\sin\\left(\\sqrt{\\lvert{x_{1}^{2} + x_{2}}\\rvert}\\right)} \\right |^{0.5} + (x_{1} + x_{2})/100 with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Mishra, S. Global Optimization by Differential Evolution and Particle Swarm Methods: Evaluation on Some Benchmark Functions. Munich Personal RePEc Archive, 2006, 1005 TODO Line 368 TODO WARNING, minimum value is estimated from running many optimisations and choosing the best.", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_N.py", - "ast_data": "ClassDef name:NewFunction02 FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Return return:yes" - }, - { - "library": "pandas", - "name": "generate", - "source_code": "def generate(self, v) -> str: val = v.tostring(self.encoding) return f'({self.lhs} {self.op} {val})'", - "docstring": "create and return the op string for this TermValue", - "type": "method", - "file_path": "pandas\\pandas\\core\\computation\\pytables.py", - "ast_data": "FunctionDef name:generate arguments arg:self arg:v Assign Call call:tostring Return return:yes" - }, - { - "library": "scipy", - "name": "fractional_matrix_power", - "source_code": "@_apply_over_batch(('A', 2)) def fractional_matrix_power(A, t): A = _asarray_square(A) import scipy.linalg._matfuncs_inv_ssq return scipy.linalg._matfuncs_inv_ssq._fractional_matrix_power(A, t)", - "docstring": "Compute the fractional power of a matrix. Proceeds according to the discussion in section (6) of [1]_. Parameters ---------- A : (N, N) array_like Matrix whose fractional power to evaluate. t : float Fractional power. Returns ------- X : (N, N) array_like The fractional power of the matrix. References ---------- .. [1] Nicholas J. Higham and Lijing lin (2011) \"A Schur-Pade Algorithm for Fractional Powers of a Matrix.\" SIAM Journal on Matrix Analysis and Applications, 32 (3). pp. 1056-1078. ISSN 0895-4798 Examples -------- >>> import numpy as np >>> from scipy.linalg import fractional_matrix_power >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) >>> b = fractional_matrix_power(a, 0.5) >>> b array([[ 0.75592895, 1.13389342], [ 0.37796447, 1.88982237]]) >>> np.dot(b, b) # Verify square root array([[ 1., 3.], [ 1., 4.]])", - "type": "function", - "file_path": "scipy\\scipy\\linalg\\_matfuncs.py", - "ast_data": "FunctionDef name:fractional_matrix_power arguments arg:A arg:t Call call:_apply_over_batch Assign Call call:_asarray_square Return return:yes" - }, - { - "library": "kornia", - "name": "RandomSharpness", - "source_code": "class RandomSharpness(IntensityAugmentationBase2D): def __init__(self, sharpness: Union[Tensor, float, Tuple[float, float]] = 0.5, same_on_batch: bool = False, p: float = 0.5, keepdim: bool = False) -> None: super().__init__(p = p, same_on_batch = same_on_batch, keepdim = keepdim) self._param_generator = rg.PlainUniformGenerator((sharpness, 'sharpness', 0.0, (0, float('inf')))) def apply_transform(self, input: Tensor, params: Dict[str, Tensor], flags: Dict[str, Any], transform: Optional[Tensor] = None) -> Tensor: factor = params['sharpness'] return sharpness(input, factor)", - "docstring": "Sharpen given tensor image or a batch of tensor images randomly. .. image:: _static/img/RandomSharpness.png Args: p: probability of applying the transformation. sharpness: factor of sharpness strength. Must be above 0. same_on_batch: apply the same transformation across the batch. keepdim: whether to keep the output shape the same as input (True) or broadcast it to the batch form (False). Shape: - Input: :math: or :math:, Optional: :math: - Output: :math: .. note:: This function internally uses :func:. Examples: >>> rng = torch.manual_seed(0) >>> input = torch.rand(1, 1, 5, 5) >>> sharpness = RandomSharpness(1., p=1.) >>> sharpness(input) tensor([[[[0.4963, 0.7682, 0.0885, 0.1320, 0.3074], [0.6341, 0.4810, 0.7367, 0.4177, 0.6323], [0.3489, 0.4428, 0.1562, 0.2443, 0.2939], [0.5185, 0.6462, 0.7050, 0.2288, 0.2823], [0.6816, 0.9152, 0.3971, 0.8742, 0.4194]]]]) To apply the exact augmenation again, you may take the advantage of the previous parameter state: >>> input = torch.randn(1, 3, 32, 32) >>> aug = RandomSharpness(1., p=1.) >>> (aug(input) == aug(input, params=aug._params)).all() tensor(True)", - "type": "class", - "file_path": "kornia\\kornia\\augmentation\\_2d\\intensity\\sharpness.py", - "ast_data": "ClassDef name:RandomSharpness FunctionDef name:__init__ arguments arg:self arg:sharpness type:Union[Tensor, float, Tuple[float, float]] arg:same_on_batch type:bool arg:p type:float arg:keepdim type:bool Assign Call call:PlainUniformGenerator FunctionDef name:apply_transform arguments arg:self arg:input type:Tensor arg:params type:Dict[str, Tensor] arg:flags type:Dict[str, Any] arg:transform type:Optional[Tensor] Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "patch_dynamo_config", - "source_code": "def patch_dynamo_config(arg1: Optional[Union[str, dict[str, Any], tuple[tuple[str, Any], ...]]] = None, arg2: Any = None, **kwargs: Any) -> DynamoConfigPatchProxy: if isinstance(arg1, tuple): arg1 = dict(arg1) config_patch = torch._dynamo.config.patch(arg1, arg2, **kwargs) _patch_dynamo_config_check(config_patch.changes) return DynamoConfigPatchProxy(config_patch)", - "docstring": "A wrapper around torch._dynamo.config.patch that can be traced by Dynamo to temporarily change config values DURING tracing. See _allowed_config_patches for the list of allowed config patches. Arguments are the same as with torch._dynamo.confing.patch. Can be used as a decorator or a context manager. User code SHOULD NOT MODIFY the return value of this function. WARNING: changing Dynamo config during tracing can lead to unpredictable tracing behavior! Proceed only as advised!", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\decorators.py", - "ast_data": "FunctionDef name:patch_dynamo_config arguments arg:arg1 type:Optional[Union[str, dict[str, Any], tuple[tuple[str, Any], ...]]] arg:arg2 type:Any kwarg:kwargs If Call call:isinstance Assign Call call:dict Assign Call call:patch Return return:yes" - }, - { - "library": "pytorch", - "name": "prod", - "source_code": "def prod(xs: Sequence[NumberType]) -> NumberType: return reduce(operator.mul, xs, 1)", - "docstring": "Product of elements in input sequence. Returns 1 for empty sequence", - "type": "function", - "file_path": "pytorch\\torch\\_prims_common\\__init__.py", - "ast_data": "FunctionDef name:prod arguments arg:xs type:Sequence[NumberType] Return return:yes" - }, - { - "library": "pytorch", - "name": "pg_group_ranks", - "source_code": "@property def pg_group_ranks(self) -> dict[ProcessGroup, dict[int, int]]: global _pg_group_ranks return _pg_group_ranks", - "docstring": "Process group's global rank to local rank mapping. TODO don't expose the map, expose fine grained ops", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", - "ast_data": "FunctionDef name:pg_group_ranks arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "normalize_module", - "source_code": "@compatibility(is_backward_compatible = False) def normalize_module(root: torch.nn.Module, target: str, args: tuple[Any], kwargs: Optional[dict[str, Any]] = None, normalize_to_only_use_kwargs: bool = False) -> Optional[ArgsKwargsPair]: try: submod = root.get_submodule(target) except AttributeError as e: raise RuntimeError(f'Tried to normalize node with target {target} but root did not have that target!') from e if hasattr(submod.__class__, '__name__'): classname = submod.__class__.__name__ if getattr(torch.nn, classname, None) = = submod.__class__: sig = inspect.signature(inspect.unwrap(submod.forward)) if kwargs is None: kwargs = {} new_args_and_kwargs = _args_kwargs_to_normalized_args_kwargs(sig, args, kwargs, normalize_to_only_use_kwargs) return new_args_and_kwargs return None", - "docstring": "Returns normalized arguments to PyTorch modules. This means that will be matched up to the functional's signature and return exclusively kwargs in positional order if is True. Also populates default values. Does not support positional-only parameters or varargs parameters (*args, **kwargs). Args: root (nn.Module): root module upon which we query modules target (Callable): Function that we are normalizing args (Tuple[Any]): Tuple of args to the function kwargs (Optional[Dict[str, Any]]): Dict of kwargs to the function normalize_to_only_use_kwargs (bool): Whether to normalize to only use kwargs. Returns: Returns normalized_args_and_kwargs, or if not successful.", - "type": "function", - "file_path": "pytorch\\torch\\fx\\operator_schemas.py", - "ast_data": "FunctionDef name:normalize_module arguments arg:root type:torch.nn.Module arg:target type:str arg:args type:tuple[Any] arg:kwargs type:Optional[dict[str, Any]] arg:normalize_to_only_use_kwargs type:bool Call call:compatibility Try Assign Call call:get_submodule ExceptHandler Raise raises:RuntimeError(f'Tried to normalize node with target {target} but root did not have that target!') If Call call:hasattr Assign If Compare op:Eq Assign Call call:signature If Compare op:Is Assign Assign Call call:_args_kwargs_to_normalized_args_kwargs Return return:yes Return return:yes" - }, - { - "library": "numpy", - "name": "items", - "source_code": "def items(self): return Mapping.items(self)", - "docstring": "D.items() returns a set-like object providing a view on the items", - "type": "method", - "file_path": "numpy\\numpy\\lib\\_npyio_impl.py", - "ast_data": "FunctionDef name:items arguments arg:self Return return:yes" - }, - { - "library": "kornia", - "name": "forward", - "source_code": "def forward(self) -> Tensor: rot = self.scale * angle_to_rotation_matrix(self.rot) out = convert_affinematrix_to_homography(torch.cat([rot, self.shift], dim = 2)) return out", - "docstring": "Single-batch similarity transform\". Returns: Similarity with shape :math:", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\transform\\image_registrator.py", - "ast_data": "FunctionDef name:forward arguments arg:self Assign Assign Call call:convert_affinematrix_to_homography Return return:yes" - }, - { - "library": "mongo", - "name": "write_command", - "source_code": "def write_command(self, request_id: int, msg: bytes, codec_options: CodecOptions) -> dict[str, Any]: self.send_message(msg, 0) reply = self.receive_message(request_id) result = reply.command_response(codec_options) helpers_shared._check_command_response(result, self.max_wire_version) return result", - "docstring": "Send \"insert\" etc. command, returning response as a dict. Can raise ConnectionFailure or OperationFailure. :param request_id: an int. :param msg: bytes, the command message.", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\pool.py", - "ast_data": "FunctionDef name:write_command arguments arg:self arg:request_id type:int arg:msg type:bytes arg:codec_options type:CodecOptions Assign Call call:receive_message Assign Call call:command_response Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_legend", - "source_code": "def get_legend(self): return self.legend_", - "docstring": "Return the instance, or None if no legend is defined.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", - "ast_data": "FunctionDef name:get_legend arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_params", - "source_code": "def set_params(self, **kwargs): _api.warn_external(\"'set_params()' not defined for locator of type \" + str(type(self)))", - "docstring": "Do nothing, and raise a warning. Any locator class not supporting the set_params() function will call this.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", - "ast_data": "FunctionDef name:set_params arguments arg:self kwarg:kwargs" - }, - { - "library": "feincms", - "name": "are_ancestors_active", - "source_code": "def are_ancestors_active(self): if self.is_root_node(): return True queryset = PageManager.apply_active_filters(self.get_ancestors()) return queryset.count() > = self.level", - "docstring": "Check whether all ancestors of this page are active", - "type": "method", - "file_path": "feincms\\feincms\\module\\page\\models.py", - "ast_data": "FunctionDef name:are_ancestors_active arguments arg:self If Call call:is_root_node Return return:yes Assign Call call:apply_active_filters Return return:yes" - }, - { - "library": "django", - "name": "__init__", - "source_code": "def __init__(self, type_input): if isinstance(type_input, OGRGeomType): num = type_input.num elif isinstance(type_input, str): type_input = type_input.lower() if type_input = = 'geometry': type_input = 'unknown' num = self._str_types.get(type_input) if num is None: raise GDALException('Invalid OGR String Type \"%s\"' % type_input) elif isinstance(type_input, int): if type_input not in self._types: raise GDALException('Invalid OGR Integer Type: %d' % type_input) num = type_input else: raise TypeError('Invalid OGR input type given.') self.num = num", - "docstring": "Figure out the correct OGR Type based upon the input.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\geomtype.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:type_input If Call call:isinstance Assign If Call call:isinstance Assign Call call:lower If Compare op:Eq Assign Assign Call call:get If Compare op:Is Raise raises:GDALException('Invalid OGR String Type \"%s\"' % type_input) If Call call:isinstance If Compare op:NotIn Raise raises:GDALException('Invalid OGR Integer Type: %d' % type_input) Assign Raise raises:TypeError('Invalid OGR input type given.') Assign" - }, - { - "library": "tensorflow", - "name": "scale_loss_for_distribution", - "source_code": "def scale_loss_for_distribution(loss_value): num_replicas = distribute_lib.get_strategy().num_replicas_in_sync if num_replicas > 1: loss_value * = 1.0 / num_replicas return loss_value", - "docstring": "Scales and returns the given loss value by the number of replicas.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\losses_utils.py", - "ast_data": "FunctionDef name:scale_loss_for_distribution arguments arg:loss_value Assign If Compare op:Gt Return return:yes" - }, - { - "library": "tensorflow", - "name": "categorical_accuracy", - "source_code": "@dispatch.add_dispatch_support def categorical_accuracy(y_true, y_pred): return math_ops.cast(math_ops.equal(math_ops.argmax(y_true, axis = -1), math_ops.argmax(y_pred, axis = -1)), backend.floatx())", - "docstring": "Calculates how often predictions match one-hot labels. Standalone usage: >>> y_true = [[0, 0, 1], [0, 1, 0]] >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]] >>> m = tf.keras.metrics.categorical_accuracy(y_true, y_pred) >>> assert m.shape == (2,) >>> m.numpy() array([0., 1.], dtype=float32) You can provide logits of classes as , since argmax of logits and probabilities are same. Args: y_true: One-hot ground truth values. y_pred: The prediction values. Returns: Categorical accuracy values.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", - "ast_data": "FunctionDef name:categorical_accuracy arguments arg:y_true arg:y_pred Return return:yes" - }, - { - "library": "matplotlib", - "name": "PatchCollection", - "source_code": "class PatchCollection(Collection): def __init__(self, patches, *, match_original = False, **kwargs): if match_original: def determine_facecolor(patch): if patch.get_fill(): return patch.get_facecolor() return [0, 0, 0, 0] kwargs['facecolors'] = [determine_facecolor(p) for p in patches] kwargs['edgecolors'] = [p.get_edgecolor() for p in patches] kwargs['linewidths'] = [p.get_linewidth() for p in patches] kwargs['linestyles'] = [p.get_linestyle() for p in patches] kwargs['antialiaseds'] = [p.get_antialiased() for p in patches] super().__init__(**kwargs) self.set_paths(patches) def set_paths(self, patches): paths = [p.get_transform().transform_path(p.get_path()) for p in patches] self._paths = paths", - "docstring": "A generic collection of patches. PatchCollection draws faster than a large number of equivalent individual Patches. It also makes it easier to assign a colormap to a heterogeneous collection of patches.", - "type": "class", - "file_path": "matplotlib\\lib\\matplotlib\\collections.py", - "ast_data": "ClassDef name:PatchCollection FunctionDef name:__init__ arguments arg:self arg:patches kwarg:kwargs If FunctionDef name:determine_facecolor arguments arg:patch If Call call:get_fill Return return:yes Return return:yes Assign Assign Assign Assign Assign FunctionDef name:set_paths arguments arg:self arg:patches Assign Assign" - }, - { - "library": "pytorch", - "name": "LazyConv2d", - "source_code": "class LazyConv2d(_LazyConvXdMixin, Conv2d): cls_to_become = Conv2d def __init__(self, out_channels: int, kernel_size: _size_2_t, stride: _size_2_t = 1, padding: _size_2_t = 0, dilation: _size_2_t = 1, groups: int = 1, bias: bool = True, padding_mode: str = 'zeros', device = None, dtype = None) -> None: factory_kwargs = {'device': device, 'dtype': dtype} super().__init__(0, 0, kernel_size, stride, padding, dilation, groups, False, padding_mode, **factory_kwargs) self.weight = UninitializedParameter(**factory_kwargs) self.out_channels = out_channels if bias: self.bias = UninitializedParameter(**factory_kwargs) def _get_num_spatial_dims(self) -> int: return 2", - "docstring": "A :class: module with lazy initialization of the `Conv2dweightbiastorch.nn.modules.lazy.LazyModuleMixintorch.nn.Conv2dtorch.nn.modules.lazy.LazyModuleMixin`", - "type": "class", - "file_path": "pytorch\\torch\\nn\\modules\\conv.py", - "ast_data": "ClassDef name:LazyConv2d Assign FunctionDef name:__init__ arguments arg:self arg:out_channels type:int arg:kernel_size type:_size_2_t arg:stride type:_size_2_t arg:padding type:_size_2_t arg:dilation type:_size_2_t arg:groups type:int arg:bias type:bool arg:padding_mode type:str arg:device arg:dtype Assign Assign Call call:UninitializedParameter Assign If Assign Call call:UninitializedParameter FunctionDef name:_get_num_spatial_dims arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "degree", - "source_code": "@property def degree(self): return self._N - 1", - "docstring": "Degree of the polynomial. One less the number of control points.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\bezier.py", - "ast_data": "FunctionDef name:degree arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, name, distribution_fn, required_gpus = None, required_physical_gpus = 0, required_tpu = False, use_cloud_tpu = False, has_chief = False, num_workers = 1, num_ps = 0, share_gpu = True, pool_runner_fn = None, no_xla = False): object.__init__(self) self._name = name self._distribution_fn = distribution_fn self.required_gpus = required_gpus self.required_physical_gpus = required_physical_gpus self.required_tpu = required_tpu self.use_cloud_tpu = use_cloud_tpu self.has_chief = has_chief self.num_workers = num_workers self.num_ps = num_ps self.share_gpu = share_gpu self._pool_runner_fn = pool_runner_fn self.no_xla = no_xla", - "docstring": "Initialize NamedDistribution. Args: name: Name that will be a part of the name of the test case. distribution_fn: A callable that creates a . required_gpus: The number of GPUs that the strategy requires. Only one of and should be set. required_physical_gpus: Number of physical GPUs required. Only one of and should be set. required_tpu: Whether the strategy requires TPU. use_cloud_tpu: Whether the strategy requires cloud TPU. has_chief: Whether the strategy requires a chief worker. num_workers: The number of workers that the strategy requires. num_ps: The number of parameter servers. share_gpu: Whether to share GPUs among workers. pool_runner_fn: An optional callable that returns a MultiProcessPoolRunner to run the test. no_xla: Whether to skip in XLA tests.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\combinations.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:name arg:distribution_fn arg:required_gpus arg:required_physical_gpus arg:required_tpu arg:use_cloud_tpu arg:has_chief arg:num_workers arg:num_ps arg:share_gpu arg:pool_runner_fn arg:no_xla Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign" - }, - { - "library": "pandas", - "name": "convert_to_line_delimits", - "source_code": "def convert_to_line_delimits(s: str) -> str: if not s[0] = = '[' and s[-1] = = ']': return s s = s[1: -1] return convert_json_to_lines(s)", - "docstring": "Helper function that converts JSON lists to line delimited JSON.", - "type": "function", - "file_path": "pandas\\pandas\\io\\json\\_normalize.py", - "ast_data": "FunctionDef name:convert_to_line_delimits arguments arg:s type:str If BoolOp Compare op:Eq Return return:yes Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "build_shuffle_all_reduce", - "source_code": "def build_shuffle_all_reduce(input_tensors, gather_devices, red_op, un_op = None): input_tensors, shape = _flatten_tensors(input_tensors) dst_devices = [t.device for t in input_tensors] reduced_shards = _build_shuffle_gather(input_tensors, gather_devices, red_op, un_op) output_tensors = _build_shuffle_scatter(reduced_shards, dst_devices) if len(shape) ! = 1: output_tensors = _reshape_tensors(output_tensors, shape) return output_tensors", - "docstring": "Construct a subgraph for shuffle all-reduce. Shuffle reduce is essentially the algorithm implemented when using parameter servers. Suppose tensor length is n, there are d devices and g gather shards. Each device sends a n/g length sub-tensor to each gather shard. The gather shards perform a reduction across d fragments, then broadcast the result back to each device. The devices then join the g fully reduced fragments they receive from the shards. The gather shards could perform d-1 pairwise reductions, or one d-way reduction. The first is better where reduction Op time is low compared to transmission time, the second better in the other case. Args: input_tensors: list of values to be reduced. gather_devices: list of names of devices on which reduction shards should be placed. red_op: an n-array elementwise reduction Op un_op: optional elementwise unary Op to be applied to fully-reduced values. Returns: list of which are the fully reduced tensors.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\v1\\all_reduce.py", - "ast_data": "FunctionDef name:build_shuffle_all_reduce arguments arg:input_tensors arg:gather_devices arg:red_op arg:un_op Assign Call call:_flatten_tensors Assign Assign Call call:_build_shuffle_gather Assign Call call:_build_shuffle_scatter If Compare op:NotEq Assign Call call:_reshape_tensors Return return:yes" - }, - { - "library": "django", - "name": "disable_action", - "source_code": "def disable_action(self, name): del self._actions[name]", - "docstring": "Disable a globally-registered action. Raise KeyError for invalid names.", - "type": "method", - "file_path": "django\\django\\contrib\\admin\\sites.py", - "ast_data": "FunctionDef name:disable_action arguments arg:self arg:name" - }, - { - "library": "tensorflow", - "name": "on_test_end", - "source_code": "def on_test_end(self, logs = None): logs = self._process_logs(logs) for callback in self.callbacks: callback.on_test_end(logs)", - "docstring": "Calls the methods of its callbacks. Args: logs: Dict. Currently no data is passed to this argument for this method but that may change in the future.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks.py", - "ast_data": "FunctionDef name:on_test_end arguments arg:self arg:logs Assign Call call:_process_logs For" - }, - { - "library": "tensorflow", - "name": "op", - "source_code": "@property def op(self) -> ops.Operation: return self.values.op", - "docstring": "The that produces as an output.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\indexed_slices.py", - "ast_data": "FunctionDef name:op arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "__getstate__", - "source_code": "def __getstate__(self): state = self.__dict__ if MapDataPipe.getstate_hook is not None: return MapDataPipe.getstate_hook(state) return state", - "docstring": "Serialize functions when is available. If this doesn't cover your custom DataPipe's use case, consider writing custom methods for and , or use for serialization.", - "type": "method", - "file_path": "pytorch\\torch\\utils\\data\\datapipes\\datapipe.py", - "ast_data": "FunctionDef name:__getstate__ arguments arg:self Assign If Compare op:IsNot Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "value_labels", - "source_code": "def value_labels(self) -> dict[str, dict[int, str]]: if not self._value_labels_read: self._read_value_labels() return self._value_label_dict", - "docstring": "Return a nested dict associating each variable name to its value and label. This method retrieves the value labels from a Stata file. Value labels are mappings between the coded values and their corresponding descriptive labels in a Stata dataset. Returns ------- dict A python dictionary. See Also -------- read_stata : Read Stata file into DataFrame. DataFrame.to_stata : Export DataFrame object to Stata dta format. Examples -------- >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=[\"col_1\", \"col_2\"]) >>> time_stamp = pd.Timestamp(2000, 2, 29, 14, 21) >>> path = \"/My_path/filename.dta\" >>> value_labels = {\"col_1\": {3: \"x\"}} >>> df.to_stata( ... path, ... time_stamp=time_stamp, # doctest: +SKIP ... value_labels=value_labels, ... version=None, ... ) # doctest: +SKIP >>> with pd.io.stata.StataReader(path) as reader: # doctest: +SKIP ... print(reader.value_labels()) # doctest: +SKIP {'col_1': {3: 'x'}} >>> pd.read_stata(path) # doctest: +SKIP index col_1 col_2 0 0 1 2 1 1 x 4", - "type": "method", - "file_path": "pandas\\pandas\\io\\stata.py", - "ast_data": "FunctionDef name:value_labels arguments arg:self If Return return:yes" - }, - { - "library": "tensorflow", - "name": "VariablePolicy", - "source_code": "class VariablePolicy(object): def __init__(self, aggregation): self._aggregation = aggregation def value(self): raise NotImplementedError(f'VariablePolicy.value should be overridden by sub-classes. Type name is {type(self)}') def _is_mirrored(self): raise NotImplementedError(f'VariablePolicy._is_mirrored should be overridden by sub-classes. Type name is {type(self)}') def _as_graph_element(self, _): raise NotImplementedError(f'VariablePolicy._as_graph_element should be overridden by sub-classes. Type name is {type(self)}') def _get_cross_replica(self, var): raise NotImplementedError(f'VariablePolicy._get_cross_replica should be overridden by sub-classes. Type name is {type(self)}') def _update_replica(self, var, update_fn, value, **kwargs): raise NotImplementedError(f'VariablePolicy._update_replica should be overridden by sub-classes. Type name is {type(self)}')", - "docstring": "Policy defining synchronization and aggregation of a distributed variable. Given and parameters set on a during variable creation within scope, creates an appropriate policy object and assigns it to the distributed variable. All variable operations are delegated to the respective policy object.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", - "ast_data": "ClassDef name:VariablePolicy FunctionDef name:__init__ arguments arg:self arg:aggregation Assign FunctionDef name:value arguments arg:self Raise raises:NotImplementedError(f'VariablePolicy.value should be overridden by sub-classes. Type name is {type(self)}') FunctionDef name:_is_mirrored arguments arg:self Raise raises:NotImplementedError(f'VariablePolicy._is_mirrored should be overridden by sub-classes. Type name is {type(self)}') FunctionDef name:_as_graph_element arguments arg:self arg:_ Raise raises:NotImplementedError(f'VariablePolicy._as_graph_element should be overridden by sub-classes. Type name is {type(self)}') FunctionDef name:_get_cross_replica arguments arg:self arg:var Raise raises:NotImplementedError(f'VariablePolicy._get_cross_replica should be overridden by sub-classes. Type name is {type(self)}') FunctionDef name:_update_replica arguments arg:self arg:var arg:update_fn arg:value kwarg:kwargs Raise raises:NotImplementedError(f'VariablePolicy._update_replica should be overridden by sub-classes. Type name is {type(self)}')" - }, - { - "library": "scikit-learn", - "name": "compute_partial_dependence", - "source_code": "def compute_partial_dependence(self, grid, target_features, out): _compute_partial_dependence(self.nodes, grid, target_features, out)", - "docstring": "Fast partial dependence computation. Parameters ---------- grid : ndarray, shape (n_samples, n_target_features) The grid points on which the partial dependence should be evaluated. target_features : ndarray, shape (n_target_features) The set of target features for which the partial dependence should be evaluated. out : ndarray, shape (n_samples) The value of the partial dependence function on each grid point.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\ensemble\\_hist_gradient_boosting\\predictor.py", - "ast_data": "FunctionDef name:compute_partial_dependence arguments arg:self arg:grid arg:target_features arg:out" - }, - { - "library": "tensorflow", - "name": "get_func_graph_output", - "source_code": "def get_func_graph_output(t): for output in tensor.graph.outputs: if output is t: return t identity_op = t.consumers()[0] if identity_op.type = = 'Identity' and any((identity_op.outputs[0] is t for t in tensor.graph.outputs)): return identity_op.outputs[0] return None", - "docstring": "Returns t or Identity(t) whichever exists in graph outputs else None.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\while_v2.py", - "ast_data": "FunctionDef name:get_func_graph_output arguments arg:t For If Compare op:Is Return return:yes Assign If BoolOp Compare op:Eq Call call:any Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "getmodule", - "source_code": "def getmodule(object): return _inspect.getmodule(object)", - "docstring": "TFDecorator-aware replacement for inspect.getmodule.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\util\\tf_inspect.py", - "ast_data": "FunctionDef name:getmodule arguments arg:object Return return:yes" - }, - { - "library": "cryptography", - "name": "revocation_date_utc", - "source_code": "@property @abc.abstractmethod def revocation_date_utc(self) -> datetime.datetime: pass", - "docstring": "Returns the date of when this certificate was revoked as a non-naive UTC datetime.", - "type": "method", - "file_path": "cryptography\\src\\cryptography\\x509\\base.py", - "ast_data": "FunctionDef name:revocation_date_utc arguments arg:self" - }, - { - "library": "scipy", - "name": "pdf", - "source_code": "def pdf(self, x, df, scale): return np.exp(self.logpdf(x, df, scale))", - "docstring": "Inverse Wishart probability density function. Parameters ---------- x : array_like Quantiles, with the last axis of denoting the components. Each quantile must be a symmetric positive definite matrix. %(_doc_default_callparams)s Returns ------- pdf : ndarray Probability density function evaluated at Notes ----- %(_doc_callparams_note)s", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_multivariate.py", - "ast_data": "FunctionDef name:pdf arguments arg:self arg:x arg:df arg:scale Return return:yes" - }, - { - "library": "pytorch", - "name": "__init__", - "source_code": "def __init__(self, role: str, rank: int, local_world_size: int): self.role = role self.rank = rank self.local_world_size = local_world_size", - "docstring": "Initialize the agent class instance. Args: role (str): user-defined role for the workers with this spec rank (int): the rank of the agent local_world_size (int): number of local workers to run", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\elastic\\agent\\server\\api.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:role type:str arg:rank type:int arg:local_world_size type:int Assign Assign Assign" - }, - { - "library": "pytorch", - "name": "image", - "source_code": "def image(tag, tensor, rescale = 1, dataformats = 'NCHW'): tensor = make_np(tensor) tensor = convert_to_HWC(tensor, dataformats) scale_factor = _calc_scale_factor(tensor) tensor = tensor.astype(np.float32) tensor = (tensor * scale_factor).clip(0, 255).astype(np.uint8) image = make_image(tensor, rescale = rescale) return Summary(value = [Summary.Value(tag = tag, image = image)])", - "docstring": "Output a protocol buffer with images. The summary has up to summary values containing images. The images are built from which must be 3-D with shape and where can be: * 1: is interpreted as Grayscale. * 3: is interpreted as RGB. * 4: is interpreted as RGBA. The in the outputted Summary.Value protobufs is generated based on the name, with a suffix depending on the max_outputs setting: * If is 1, the summary value tag is '*name*/image'. * If is greater than 1, the summary value tags are generated sequentially as '*name*/image/0', '*name*/image/1', etc. Args: tag: A name for the generated node. Will also serve as a series name in TensorBoard. tensor: A 3-D or of shape where is 1, 3, or 4. 'tensor' can either have values in [0, 1] (float32) or [0, 255] (uint8). The image() function will scale the image values to [0, 255] by applying a scale factor of either 1 (uint8) or 255 (float32). Out-of-range values will be clipped. Returns: A scalar of type . The serialized protocol buffer.", - "type": "function", - "file_path": "pytorch\\torch\\utils\\tensorboard\\summary.py", - "ast_data": "FunctionDef name:image arguments arg:tag arg:tensor arg:rescale arg:dataformats Assign Call call:make_np Assign Call call:convert_to_HWC Assign Call call:_calc_scale_factor Assign Call call:astype Assign Call call:astype Assign Call call:make_image Return return:yes" - }, - { - "library": "pytorch", - "name": "get_chunk_sharding_params", - "source_code": "def get_chunk_sharding_params(sharding_dim_size, world_size, spec, rank): split_size = get_split_size(sharding_dim_size, world_size) current_offsets = 0 start_pos = current_offsets for idx, placement in enumerate(spec.placements): chunk_size = get_chunked_dim_size(sharding_dim_size, split_size, idx) if rank = = placement.rank(): start_pos = current_offsets break current_offsets + = chunk_size return (start_pos, chunk_size)", - "docstring": "Generate the start pos and offset length for the current rank for chunk sharding. Args: sharding_dim_size(int): The dimension length which we shard on. world_size(int): number of ranks. spec (:class:): sharding spec. rank(int): # of cuda process. Returns: start_pos(int): start position of sharded tensor on the given rank. chunk_size(int): chunk size of sharded tensor on the given rank.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\_internals.py", - "ast_data": "FunctionDef name:get_chunk_sharding_params arguments arg:sharding_dim_size arg:world_size arg:spec arg:rank Assign Call call:get_split_size Assign Assign For Call call:enumerate Assign Call call:get_chunked_dim_size If Compare op:Eq Assign Return return:yes" - }, - { - "library": "numpy", - "name": "check_type", - "source_code": "def check_type(self, type_name, headers = None, include_dirs = None, library_dirs = None): self._check_compiler() body = textwrap.dedent('\\n int main(void) {\\n if ((%(name)s *) 0)\\n return 0;\\n if (sizeof (%(name)s))\\n return 0;\\n }\\n ') % {'name': type_name} st = False try: try: self._compile(body % {'type': type_name}, headers, include_dirs, 'c') st = True except distutils.errors.CompileError: st = False finally: self._clean() return st", - "docstring": "Check type availability. Return True if the type can be compiled, False otherwise", - "type": "method", - "file_path": "numpy\\numpy\\distutils\\command\\config.py", - "ast_data": "FunctionDef name:check_type arguments arg:self arg:type_name arg:headers arg:include_dirs arg:library_dirs Assign Assign Try Try Assign ExceptHandler Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "log_loss", - "source_code": "@tf_export(v1 = ['losses.log_loss']) @dispatch.add_dispatch_support def log_loss(labels, predictions, weights = 1.0, epsilon = 1e-07, scope = None, loss_collection = ops.GraphKeys.LOSSES, reduction = Reduction.SUM_BY_NONZERO_WEIGHTS): if labels is None: raise ValueError('Argument `labels` must not be None.') if predictions is None: raise ValueError('Argument `predictions` must not be None.') with ops.name_scope(scope, 'log_loss', (predictions, labels, weights)) as scope: predictions = math_ops.cast(predictions, dtype = dtypes.float32) labels = math_ops.cast(labels, dtype = dtypes.float32) predictions.get_shape().assert_is_compatible_with(labels.get_shape()) losses = -math_ops.multiply(labels, math_ops.log(predictions + epsilon)) - math_ops.multiply(1 - labels, math_ops.log(1 - predictions + epsilon)) return compute_weighted_loss(losses, weights, scope, loss_collection, reduction = reduction)", - "docstring": "Adds a Log Loss term to the training procedure. acts as a coefficient for the loss. If a scalar is provided, then the loss is simply scaled by the given value. If is a tensor of size , then the total loss for each sample of the batch is rescaled by the corresponding element in the vector. If the shape of matches the shape of , then the loss of each measurable element of is scaled by the corresponding value of . Args: labels: The ground truth output tensor, same dimensions as 'predictions'. predictions: The predicted outputs. weights: Optional whose rank is either 0, or the same rank as , and must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). epsilon: A small increment to add to avoid taking a log of zero. scope: The scope for the operations performed in computing the loss. loss_collection: collection to which the loss will be added. reduction: Type of reduction to apply to loss. Returns: Weighted loss float . If is , this has the same shape as ; otherwise, it is scalar. Raises: ValueError: If the shape of doesn't match that of or if the shape of is invalid. Also if or is None. @compatibility(eager) The argument is ignored when executing eagerly. Consider holding on to the return value or collecting losses via a . @end_compatibility", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\losses\\losses_impl.py", - "ast_data": "FunctionDef name:log_loss arguments arg:labels arg:predictions arg:weights arg:epsilon arg:scope arg:loss_collection arg:reduction Call call:tf_export If Compare op:Is Raise raises:ValueError('Argument `labels` must not be None.') If Compare op:Is Raise raises:ValueError('Argument `predictions` must not be None.') With Assign Call call:cast Assign Call call:cast Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "validate_non_overlapping_shards_metadata", - "source_code": "def validate_non_overlapping_shards_metadata(shards: list[ShardMetadata]): if not shards or len(shards) = = 1: return sharded_dims: list[int] = [] for dim in range(len(shards[0].shard_offsets)): for i in range(1, len(shards)): if shards[i].shard_offsets[dim] ! = shards[0].shard_offsets[dim] or shards[i].shard_sizes[dim] ! = shards[0].shard_sizes[dim]: sharded_dims.append(dim) break pair: Optional[tuple[int, int]] = None if len(sharded_dims) = = 0: all_zeros: bool = all((shard.shard_offsets = = [0] * len(shards[0].shard_offsets) and math.prod(shard.shard_sizes) = = 0 for shard in shards)) if all_zeros: return pair = (0, 1) elif len(sharded_dims) = = 1: pair = _find_1d_overlapping_shards(shards, sharded_dims[0]) else: pair = _find_nd_overlapping_shards(shards, sharded_dims) if pair: raise ValueError(f'Shards {shards[pair[0]]} and {shards[pair[1]]} overlap')", - "docstring": "Ensures none of the shards overlap with each other. Args: shards(List[ShardMetadata]): List of :class: objects representing each shard. Raises: `` if there's overlap in any two shards.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\_shard\\sharding_spec\\_internals.py", - "ast_data": "FunctionDef name:validate_non_overlapping_shards_metadata arguments arg:shards type:list[ShardMetadata] If BoolOp Compare op:Eq Return return:no For Call call:range For Call call:range If BoolOp Compare op:NotEq Compare op:NotEq If Compare op:Eq If Return return:no Assign If Compare op:Eq Assign Call call:_find_1d_overlapping_shards Assign Call call:_find_nd_overlapping_shards If Raise raises:ValueError(f'Shards {shards[pair[0]]} and {shards[pair[1]]} overlap')" - }, - { - "library": "scipy", - "name": "Bounds", - "source_code": "class Bounds: def _input_validation(self): try: res = np.broadcast_arrays(self.lb, self.ub, self.keep_feasible) self.lb, self.ub, self.keep_feasible = res except ValueError: message = '`lb`, `ub`, and `keep_feasible` must be broadcastable.' raise ValueError(message) def __init__(self, lb = -np.inf, ub = np.inf, keep_feasible = False): if issparse(lb) or issparse(ub): raise ValueError('Lower and upper bounds must be dense arrays.') self.lb = np.atleast_1d(lb) self.ub = np.atleast_1d(ub) if issparse(keep_feasible): raise ValueError('`keep_feasible` must be a dense array.') self.keep_feasible = np.atleast_1d(keep_feasible).astype(bool) self._input_validation() def __repr__(self): start = f'{type(self).__name__}({self.lb!r}, {self.ub!r}' if np.any(self.keep_feasible): end = f', keep_feasible = {self.keep_feasible!r})' else: end = ')' return start + end def residual(self, x): return (x - self.lb, self.ub - x)", - "docstring": "Bounds constraint on the variables. The constraint has the general inequality form:: lb <= x <= ub It is possible to use equal bounds to represent an equality constraint or infinite bounds to represent a one-sided constraint. Parameters ---------- lb, ub : dense array_like, optional Lower and upper bounds on independent variables. , , and must be the same shape or broadcastable. Set components of and equal to fix a variable. Use `lbublbub`. Default is False. Has no effect for equality constraints.", - "type": "class", - "file_path": "scipy\\scipy\\optimize\\_constraints.py", - "ast_data": "ClassDef name:Bounds FunctionDef name:_input_validation arguments arg:self Try Assign Call call:broadcast_arrays Assign ExceptHandler Assign Raise raises:ValueError(message) FunctionDef name:__init__ arguments arg:self arg:lb arg:ub arg:keep_feasible If BoolOp Call call:issparse Call call:issparse Raise raises:ValueError('Lower and upper bounds must be dense arrays.') Assign Call call:atleast_1d Assign Call call:atleast_1d If Call call:issparse Raise raises:ValueError('`keep_feasible` must be a dense array.') Assign Call call:astype FunctionDef name:__repr__ arguments arg:self Assign If Call call:any Assign Assign Return return:yes FunctionDef name:residual arguments arg:self arg:x Return return:yes" - }, - { - "library": "tensorflow", - "name": "bessel_k0e", - "source_code": "@tf_export('math.special.bessel_k0e') @dispatch.register_unary_elementwise_api @dispatch.add_dispatch_support def bessel_k0e(x, name = None): with ops.name_scope(name, 'bessel_k0e', [x]): return gen_special_math_ops.bessel_k0e(x)", - "docstring": "Computes the Bessel k0e function of element-wise. Modified Bessel function of order 0. >>> tf.math.special.bessel_k0e([0.5, 1., 2., 4.]).numpy() array([1.52410939, 1.14446308, 0.84156822, 0.60929767], dtype=float32) Args: x: A or . Must be one of the following types: , , . name: A name for the operation (optional). Returns: A or , respectively. Has the same type as . @compatibility(scipy) Equivalent to scipy.special.k0e @end_compatibility", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\special_math_ops.py", - "ast_data": "FunctionDef name:bessel_k0e arguments arg:x arg:name Call call:tf_export With Return return:yes" - }, - { - "library": "pytorch", - "name": "is_bw", - "source_code": "@property def is_bw(self): return torch._C._current_graph_task_id() ! = -1", - "docstring": "A boolean marking if this is currently running during the backward pass or not", - "type": "method", - "file_path": "pytorch\\torch\\utils\\module_tracker.py", - "ast_data": "FunctionDef name:is_bw arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "check", - "source_code": "def check(self, against, using = DEFAULT_DB_ALIAS): from django.db.models import BooleanField, Value from django.db.models.functions import Coalesce from django.db.models.sql import Query from django.db.models.sql.constants import SINGLE query = Query(None) for name, value in against.items(): if not hasattr(value, 'resolve_expression'): value = Value(value) query.add_annotation(value, name, select = False) query.add_annotation(Value(1), '_check') connection = connections[using] if connection.features.supports_comparing_boolean_expr: query.add_q(Q(Coalesce(self, True, output_field = BooleanField()))) else: query.add_q(self) compiler = query.get_compiler(using = using) context_manager = transaction.atomic(using = using) if connection.in_atomic_block else nullcontext() try: with context_manager: return compiler.execute_sql(SINGLE) is not None except DatabaseError as e: logger.warning('Got a database error calling check() on %r: %s', self, e) return True", - "docstring": "Do a database query to check if the expressions of the Q instance matches against the expressions.", - "type": "method", - "file_path": "django\\django\\db\\models\\query_utils.py", - "ast_data": "FunctionDef name:check arguments arg:self arg:against arg:using Assign Call call:Query For Call call:items If Assign Call call:Value Assign If Assign Call call:get_compiler Assign Try With Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "django", - "name": "DjangoHelpFormatter", - "source_code": "class DjangoHelpFormatter(HelpFormatter): show_last = {'--version', '--verbosity', '--traceback', '--settings', '--pythonpath', '--no-color', '--force-color', '--skip-checks'} def _reordered_actions(self, actions): return sorted(actions, key = lambda a: set(a.option_strings) & self.show_last ! = set()) def add_usage(self, usage, actions, *args, **kwargs): super().add_usage(usage, self._reordered_actions(actions), *args, **kwargs) def add_arguments(self, actions): super().add_arguments(self._reordered_actions(actions))", - "docstring": "Customized formatter so that command-specific arguments appear in the --help output before arguments common to all commands.", - "type": "class", - "file_path": "django\\django\\core\\management\\base.py", - "ast_data": "ClassDef name:DjangoHelpFormatter Assign FunctionDef name:_reordered_actions arguments arg:self arg:actions Return return:yes FunctionDef name:add_usage arguments arg:self arg:usage arg:actions vararg:args kwarg:kwargs FunctionDef name:add_arguments arguments arg:self arg:actions" - }, - { - "library": "tensorflow", - "name": "log_device_compatibility_check", - "source_code": "def log_device_compatibility_check(policy_name): global _logged_compatibility_check if _logged_compatibility_check: return _logged_compatibility_check = True gpus = config.list_physical_devices('GPU') gpu_details_list = [config.get_device_details(g) for g in gpus] _log_device_compatibility_check(policy_name, gpu_details_list)", - "docstring": "Logs a compatibility check if the devices support the policy. Currently only logs for the policy mixed_float16. A log is shown only the first time this function is called. Args: policy_name: The name of the dtype policy.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\mixed_precision\\device_compatibility_check.py", - "ast_data": "FunctionDef name:log_device_compatibility_check arguments arg:policy_name If Return return:no Assign Assign Call call:list_physical_devices Assign" - }, - { - "library": "pytorch", - "name": "Conv2dBiasFollowedByBatchNorm2dPattern", - "source_code": "class Conv2dBiasFollowedByBatchNorm2dPattern(Pattern): def __init__(self, prof: profile, should_benchmark: bool = False): super().__init__(prof, should_benchmark) self.name = 'Enabling Bias in Conv2d Followed By BatchNorm Pattern' self.description = \"Detected bias enabled in Conv2d that is followed by BatchNorm2d. Please set 'bias = False' in Conv2d.\" self.url = 'https: //pytorch.org/tutorials/recipes/recipes/tuning_guide.html#disable-bias-for-convolutions-directly-followed-by-a-batch-norm' @property def skip(self): return self.prof.record_shapes is False or super().skip def match(self, event: _ProfilerEvent): if event.name ! = 'aten: : conv2d': return False if len(input_dtypes(event)) < 3 or input_dtypes(event)[2] is None: return False event = self.go_up_until(event, lambda e: e.name.startswith('nn.Module: Conv2d')) if not event: return False event = self.next_of(event) if not event: return False return event.name.startswith('nn.Module: BatchNorm2d')", - "docstring": "This pattern identifies if we are enabling bias in Conv2d which is followed by BatchNorm2d. Bias doesn't do anything when followed by batchnorm. Pattern: nn.Module: Conv2d | nn.Module: BatchNorm2d ... aten::conv2d AND dtype of third argument is not null The third argument is the bias Algorithm: String match", - "type": "class", - "file_path": "pytorch\\torch\\profiler\\_pattern_matcher.py", - "ast_data": "ClassDef name:Conv2dBiasFollowedByBatchNorm2dPattern FunctionDef name:__init__ arguments arg:self arg:prof type:profile arg:should_benchmark type:bool Assign Assign Assign FunctionDef name:skip arguments arg:self Return return:yes FunctionDef name:match arguments arg:self arg:event type:_ProfilerEvent If Compare op:NotEq Return return:yes If BoolOp Compare op:Lt Compare op:Is Return return:yes Assign Call call:go_up_until If Return return:yes Assign Call call:next_of If Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "quote", - "source_code": "def quote(s): return s.translate(QUOTE_MAP) if isinstance(s, str) else s", - "docstring": "Ensure that primary key values do not confuse the admin URLs by escaping any '/', '_' and ':' and similarly problematic characters. Similar to urllib.parse.quote(), except that the quoting is slightly different so that it doesn't get automatically unquoted by the web browser.", - "type": "function", - "file_path": "django\\django\\contrib\\admin\\utils.py", - "ast_data": "FunctionDef name:quote arguments arg:s Return return:yes" - }, - { - "library": "tensorflow", - "name": "is_custom_device", - "source_code": "def is_custom_device(self, device_name): self.ensure_initialized() return pywrap_tfe.TFE_Py_IsCustomDevice(self._handle, device_name)", - "docstring": "Calls TFE_IsCustomDevice. See the non-member function.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", - "ast_data": "FunctionDef name:is_custom_device arguments arg:self arg:device_name Return return:yes" - }, - { - "library": "authlib", - "name": "validate_request_object_encryption_enc", - "source_code": "def validate_request_object_encryption_enc(self): if self.get('request_object_encryption_enc') and (not self.get('request_object_encryption_alg')): raise InvalidClaimError('request_object_encryption_enc') if self.get('request_object_encryption_alg'): self.setdefault('request_object_encryption_enc', 'A128CBC-HS256') self._validate_claim_value('request_object_encryption_enc')", - "docstring": "JWE enc algorithm [JWA] the RP is declaring that it may use for encrypting Request Objects sent to the OP. If request_object_encryption_alg is specified, the default request_object_encryption_enc value is A128CBC-HS256. When request_object_encryption_enc is included, request_object_encryption_alg MUST also be provided.", - "type": "method", - "file_path": "authlib\\authlib\\oidc\\registration\\claims.py", - "ast_data": "FunctionDef name:validate_request_object_encryption_enc arguments arg:self If BoolOp Call call:get Raise raises:InvalidClaimError('request_object_encryption_enc') If Call call:get" - }, - { - "library": "tensorflow", - "name": "update_state", - "source_code": "def update_state(self, y_true, y_pred, sample_weight = None): return metrics_utils.update_confusion_matrix_variables({metrics_utils.ConfusionMatrix.TRUE_POSITIVES: self.true_positives, metrics_utils.ConfusionMatrix.FALSE_POSITIVES: self.false_positives}, y_true, y_pred, thresholds = self.thresholds, thresholds_distributed_evenly = self._thresholds_distributed_evenly, top_k = self.top_k, class_id = self.class_id, sample_weight = sample_weight)", - "docstring": "Accumulates true positive and false positive statistics. Args: y_true: The ground truth values, with the same dimensions as . Will be cast to . y_pred: The predicted values. Each element must be in the range . sample_weight: Optional weighting of each example. Defaults to 1. Can be a whose rank is either 0, or the same rank as , and must be broadcastable to . Returns: Update op.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\metrics.py", - "ast_data": "FunctionDef name:update_state arguments arg:self arg:y_true arg:y_pred arg:sample_weight Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_markeredgewidth", - "source_code": "def set_markeredgewidth(self, ew): ew = mpl._val_or_rc(ew, 'lines.markeredgewidth') if self._markeredgewidth ! = ew: self.stale = True self._markeredgewidth = ew", - "docstring": "Set the marker edge width in points. Parameters ---------- ew : float Marker edge width, in points.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\lines.py", - "ast_data": "FunctionDef name:set_markeredgewidth arguments arg:self arg:ew Assign Call call:_val_or_rc If Compare op:NotEq Assign Assign" - }, - { - "library": "matplotlib", - "name": "open_group", - "source_code": "def open_group(self, s, gid = None): pass", - "docstring": "Open a grouping element with label *s* and *gid* (if set) as id. Only used by the SVG renderer.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", - "ast_data": "FunctionDef name:open_group arguments arg:self arg:s arg:gid" - }, - { - "library": "tensorflow", - "name": "graph_context_for_symbolic_tensors", - "source_code": "@tf_contextlib.contextmanager def graph_context_for_symbolic_tensors(*args, **kwargs): if any((is_symbolic_tensor(v) for v in list(args) + list(kwargs.values()))): with K.get_graph().as_default(): yield else: yield", - "docstring": "Returns graph context manager if any of the inputs is a symbolic tensor.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\tf_utils.py", - "ast_data": "FunctionDef name:graph_context_for_symbolic_tensors arguments vararg:args kwarg:kwargs If Call call:any With" - }, - { - "library": "coconut", - "name": "assert_remove_prefix", - "source_code": "def assert_remove_prefix(inputstr, prefix, allow_no_prefix = False): if not allow_no_prefix: assert inputstr.startswith(prefix), inputstr elif not inputstr.startswith(prefix): return inputstr return inputstr[len(prefix):]", - "docstring": "Remove prefix asserting that inputstr starts with it.", - "type": "function", - "file_path": "coconut\\coconut\\util.py", - "ast_data": "FunctionDef name:assert_remove_prefix arguments arg:inputstr arg:prefix arg:allow_no_prefix If If Return return:yes Return return:yes" - }, - { - "library": "kornia", - "name": "ty", - "source_code": "@property def ty(self) -> Tensor: return self.extrinsics[..., 1, -1]", - "docstring": "Return the y-coordinate of the translation vector. Returns: tensor of shape :math:.", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\camera\\pinhole.py", - "ast_data": "FunctionDef name:ty arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "execution_to_tensor_values", - "source_code": "def execution_to_tensor_values(self, execution): debug_event = self._reader.read_execution_event(execution.locator) return [_parse_tensor_value(tensor_proto) for tensor_proto in debug_event.execution.tensor_protos]", - "docstring": "Read the full tensor values from an Execution or ExecutionDigest. Args: execution: An or object. Returns: A list of numpy arrays representing the output tensor values of the execution event.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", - "ast_data": "FunctionDef name:execution_to_tensor_values arguments arg:self arg:execution Assign Call call:read_execution_event Return return:yes" - }, - { - "library": "algorithms", - "name": "rotate_v2", - "source_code": "def rotate_v2(array, k): array = array[:] def reverse(arr, a, b): while a < b: arr[a], arr[b] = (arr[b], arr[a]) a + = 1 b - = 1 n = len(array) k = k % n reverse(array, 0, n - k - 1) reverse(array, n - k, n - 1) reverse(array, 0, n - 1) return array", - "docstring": "Reverse segments of the array, followed by the entire array T(n)- O(n) :type array: List[int] :type k: int :rtype: void Do not return anything, modify nums in-place instead.", - "type": "function", - "file_path": "algorithms\\algorithms\\arrays\\rotate.py", - "ast_data": "FunctionDef name:rotate_v2 arguments arg:array arg:k Assign FunctionDef name:reverse arguments arg:arr arg:a arg:b While Compare op:Lt Assign Assign Call call:len Assign Return return:yes" - }, - { - "library": "flexx", - "name": "variables", - "source_code": "@property def variables(self): return self._provided_names", - "docstring": "The names of variables provided by this module. A name passed to add_variable, might not end up in this list if its imported into this module rather than defined here.", - "type": "method", - "file_path": "flexx\\flexx\\app\\_modules.py", - "ast_data": "FunctionDef name:variables arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "set_exception", - "source_code": "def set_exception(self, result: T) -> None: assert isinstance(result, Exception), f'{result} is of type {type(result)}, not an Exception.' def raise_error(fut_result): raise fut_result super()._set_unwrap_func(raise_error) self.set_result(result)", - "docstring": "Set an exception for this ``. Example:: >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_FUTURES) >>> fut = torch.futures.Future() >>> fut.set_exception(ValueError(\"foo\")) >>> fut.wait() Traceback (most recent call last): ... ValueError: foo", - "type": "method", - "file_path": "pytorch\\torch\\futures\\__init__.py", - "ast_data": "FunctionDef name:set_exception arguments arg:self arg:result type:T FunctionDef name:raise_error arguments arg:fut_result Raise raises:fut_result" - }, - { - "library": "tensorflow", - "name": "false_positives", - "source_code": "@tf_export(v1 = ['metrics.false_positives']) def false_positives(labels, predictions, weights = None, metrics_collections = None, updates_collections = None, name = None): if context.executing_eagerly(): raise RuntimeError('tf.metrics.false_positives is not supported when eager execution is enabled.') with variable_scope.variable_scope(name, 'false_positives', (predictions, labels, weights)): predictions, labels, weights = _remove_squeezable_dimensions(predictions = math_ops.cast(predictions, dtype = dtypes.bool), labels = math_ops.cast(labels, dtype = dtypes.bool), weights = weights) is_false_positive = math_ops.logical_and(math_ops.equal(labels, False), math_ops.equal(predictions, True)) return _count_condition(is_false_positive, weights, metrics_collections, updates_collections)", - "docstring": "Sum the weights of false positives. If is , weights default to 1. Use weights of 0 to mask values. Args: labels: The ground truth values, a whose dimensions must match . Will be cast to . predictions: The predicted values, a of arbitrary dimensions. Will be cast to . weights: Optional whose rank is either 0, or the same rank as , and must be broadcastable to (i.e., all dimensions must be either , or the same as the corresponding dimension). metrics_collections: An optional list of collections that the metric value variable should be added to. updates_collections: An optional list of collections that the metric update ops should be added to. name: An optional variable_scope name. Returns: value_tensor: A representing the current value of the metric. update_op: An operation that accumulates the error from a batch of data. Raises: ValueError: If and have mismatched shapes, or if is not and its shape doesn't match , or if either or are not a list or tuple. RuntimeError: If eager execution is enabled.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\metrics_impl.py", - "ast_data": "FunctionDef name:false_positives arguments arg:labels arg:predictions arg:weights arg:metrics_collections arg:updates_collections arg:name Call call:tf_export If Call call:executing_eagerly Raise raises:RuntimeError('tf.metrics.false_positives is not supported when eager execution is enabled.') With Assign Call call:_remove_squeezable_dimensions Assign Call call:logical_and Return return:yes" - }, - { - "library": "scrapy", - "name": "xpath", - "source_code": "def xpath(self, *a: Any, **kw: Any) -> SelectorList: raise NotSupported(\"Response content isn't text\")", - "docstring": "Shortcut method implemented only by responses whose content is text (subclasses of TextResponse).", - "type": "method", - "file_path": "scrapy\\scrapy\\http\\response\\__init__.py", - "ast_data": "FunctionDef name:xpath arguments arg:self vararg:a kwarg:kw Raise raises:NotSupported(\"Response content isn't text\")" - }, - { - "library": "authlib", - "name": "register_signature_method", - "source_code": "@classmethod def register_signature_method(cls, name, sign): cls.SIGNATURE_METHODS[name] = sign", - "docstring": "Extend client signature methods. :param name: A string to represent signature method. :param sign: A function to generate signature. The `` method accept 2 parameters:: def custom_sign_method(client, request): # client is the instance of Client. return \"your-signed-string\" Client.register_signature_method(\"custom-name\", custom_sign_method)", - "type": "method", - "file_path": "authlib\\authlib\\oauth1\\rfc5849\\client_auth.py", - "ast_data": "FunctionDef name:register_signature_method arguments arg:cls arg:name arg:sign Assign" - }, - { - "library": "pytorch", - "name": "maybe_disable_graph_partition", - "source_code": "def maybe_disable_graph_partition(cpp_wrapper: bool, aot_mode: bool) -> AbstractContextManager[None, None]: if cpp_wrapper or aot_mode: return config.patch(graph_partition = False) else: return contextlib.nullcontext()", - "docstring": "graph partition does not support cpp_wrapper and aot_mode yet.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\compile_fx.py", - "ast_data": "FunctionDef name:maybe_disable_graph_partition arguments arg:cpp_wrapper type:bool arg:aot_mode type:bool If BoolOp Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "__call__", - "source_code": "def __call__(self, value, clip = None): if clip is None: clip = self.clip xx, is_scalar = self.process_value(value) mask = np.ma.getmaskarray(xx) xx = np.atleast_1d(xx.filled(self.vmax + 1)) if clip: np.clip(xx, self.vmin, self.vmax, out = xx) max_col = self.Ncmap - 1 else: max_col = self.Ncmap iret = np.digitize(xx, self.boundaries) - 1 + self._offset if self.Ncmap > self._n_regions: if self._n_regions = = 1: iret[iret = = 0] = (self.Ncmap - 1) // 2 else: iret = (self.Ncmap - 1) / (self._n_regions - 1) * iret iret = iret.astype(np.int16) iret[xx < self.vmin] = -1 iret[xx > = self.vmax] = max_col ret = np.ma.array(iret, mask = mask) if is_scalar: ret = int(ret[0]) return ret", - "docstring": "This method behaves similarly to , except that it returns integers or arrays of int16.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\colors.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:value arg:clip If Compare op:Is Assign Assign Call call:process_value Assign Call call:getmaskarray Assign Call call:atleast_1d If Assign Assign Assign If Compare op:Gt If Compare op:Eq Assign Assign Assign Call call:astype Assign Assign Assign Call call:array If Assign Call call:int Return return:yes" - }, - { - "library": "flexx", - "name": "minify", - "source_code": "def minify(code, remove_whitespace = False): code = remove_comments(code) if remove_whitespace: code = remove_all_whitespace(code) else: code = remove_trailing_whitespace(code) code = remove_empty_lines(code) code = tabbify(code) return code", - "docstring": "Very basic minification of JavaScript code. Will likely support more advanced minifcation in the future. Parameters: code (str) : the JavaScript code to minify. remove_whitespace (bool) : if True, removes all non-functional whitespace. Otherwise remove all trailing whitespace and indents using tabs to preserve space. Default False.", - "type": "function", - "file_path": "flexx\\flexx\\util\\minify.py", - "ast_data": "FunctionDef name:minify arguments arg:code arg:remove_whitespace Assign Call call:remove_comments If Assign Call call:remove_all_whitespace Assign Call call:remove_trailing_whitespace Assign Call call:remove_empty_lines Assign Call call:tabbify Return return:yes" - }, - { - "library": "algorithms", - "name": "__init__", - "source_code": "def __init__(self, capacity = 10): super().__init__() self._array = [None] * capacity self._front = 0 self._rear = 0", - "docstring": "Initialize python List with capacity of 10 or user given input. Python List type is a dynamic array, so we have to restrict its dynamic nature to make it work like a static array.", - "type": "method", - "file_path": "algorithms\\algorithms\\queues\\queue.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:capacity Assign Assign Assign" - }, - { - "library": "pandas", - "name": "nunique", - "source_code": "def nunique(self, dropna: bool = True) -> Series | DataFrame: ids = self._grouper.ids ngroups = self._grouper.ngroups val = self.obj._values codes, uniques = algorithms.factorize(val, use_na_sentinel = dropna, sort = False) if self._grouper.has_dropped_na: mask = ids > = 0 ids = ids[mask] codes = codes[mask] group_index = get_group_index(labels = [ids, codes], shape = (ngroups, len(uniques)), sort = False, xnull = dropna) if dropna: mask = group_index > = 0 if (~mask).any(): ids = ids[mask] group_index = group_index[mask] mask = duplicated(group_index, 'first') res = np.bincount(ids[~mask], minlength = ngroups) res = ensure_int64(res) ri = self._grouper.result_index result: Series | DataFrame = self.obj._constructor(res, index = ri, name = self.obj.name) if not self.as_index: result = self._insert_inaxis_grouper(result) result.index = default_index(len(result)) return result", - "docstring": "Return number of unique elements in the group. Parameters ---------- dropna : bool, default True Don't include NaN in the counts. Returns ------- Series Number of unique values within each group. See Also -------- core.resample.Resampler.nunique : Method nunique for Resampler. Examples -------- >>> lst = [\"a\", \"a\", \"b\", \"b\"] >>> ser = pd.Series([1, 2, 3, 3], index=lst) >>> ser a 1 a 2 b 3 b 3 dtype: int64 >>> ser.groupby(level=0).nunique() a 2 b 1 dtype: int64", - "type": "method", - "file_path": "pandas\\pandas\\core\\groupby\\generic.py", - "ast_data": "FunctionDef name:nunique arguments arg:self arg:dropna type:bool Assign Assign Assign Assign Call call:factorize If Assign Compare op:GtE Assign Assign Assign Call call:get_group_index If Assign Compare op:GtE If Call call:any Assign Assign Assign Call call:duplicated Assign Call call:bincount Assign Call call:ensure_int64 Assign If Assign Call call:_insert_inaxis_grouper Assign Call call:default_index Return return:yes" - }, - { - "library": "cherrypy", - "name": "validate_since", - "source_code": "def validate_since(): response = cherrypy.serving.response lastmod = response.headers.get('Last-Modified') if lastmod: status, reason, msg = _httputil.valid_status(response.status) request = cherrypy.serving.request since = request.headers.get('If-Unmodified-Since') if since and since ! = lastmod: if status > = 200 and status < = 299 or status = = 412: raise cherrypy.HTTPError(412) since = request.headers.get('If-Modified-Since') if since and since = = lastmod: if status > = 200 and status < = 299 or status = = 304: if request.method in ('GET', 'HEAD'): raise cherrypy.HTTPRedirect([], 304) else: raise cherrypy.HTTPError(412)", - "docstring": "Validate the current Last-Modified against If-Modified-Since headers. If no code has set the Last-Modified response header, then no validation will be performed.", - "type": "function", - "file_path": "cherrypy\\cherrypy\\lib\\cptools.py", - "ast_data": "FunctionDef name:validate_since arguments Assign Assign Call call:get If Assign Call call:valid_status Assign Assign Call call:get If BoolOp Compare op:NotEq If BoolOp BoolOp Compare op:GtE Compare op:LtE Compare op:Eq Raise raises:cherrypy.HTTPError(412) Assign Call call:get If BoolOp Compare op:Eq If BoolOp BoolOp Compare op:GtE Compare op:LtE Compare op:Eq If Compare op:In Raise raises:cherrypy.HTTPRedirect([], 304) Raise raises:cherrypy.HTTPError(412)" - }, - { - "library": "django", - "name": "get_traceback_text", - "source_code": "def get_traceback_text(self): with self.text_template_path.open(encoding = 'utf-8') as fh: t = DEBUG_ENGINE.from_string(fh.read()) c = Context(self.get_traceback_data(), autoescape = False, use_l10n = False) return t.render(c)", - "docstring": "Return plain text version of debug 500 HTTP error page.", - "type": "method", - "file_path": "django\\django\\views\\debug.py", - "ast_data": "FunctionDef name:get_traceback_text arguments arg:self With Assign Call call:from_string Assign Call call:Context Return return:yes" - }, - { - "library": "tensorflow", - "name": "list_source", - "source_code": "def list_source(self, args, screen_info = None): del screen_info parsed = self._arg_parsers['list_source'].parse_args(args) source_list = source_utils.list_source_files_against_dump(self._debug_dump, path_regex_allowlist = parsed.path_filter, node_name_regex_allowlist = parsed.node_name_filter) top_lines = [RL('List of source files that created nodes in this run', 'bold')] if parsed.path_filter: top_lines.append(RL('File path regex filter: \"%s\"' % parsed.path_filter)) if parsed.node_name_filter: top_lines.append(RL('Node name regex filter: \"%s\"' % parsed.node_name_filter)) top_lines.append(RL()) output = debugger_cli_common.rich_text_lines_from_rich_line_list(top_lines) if not source_list: output.append('[No source file information.]') return output output.extend(self._make_source_table([item for item in source_list if not item[1]], False)) output.extend(self._make_source_table([item for item in source_list if item[1]], True)) _add_main_menu(output, node_name = None) return output", - "docstring": "List Python source files that constructed nodes and tensors.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\analyzer_cli.py", - "ast_data": "FunctionDef name:list_source arguments arg:self arg:args arg:screen_info Assign Call call:parse_args Assign Call call:list_source_files_against_dump Assign If If Assign Call call:rich_text_lines_from_rich_line_list If Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "parse_arguments", - "source_code": "def parse_arguments() -> argparse.Namespace: parser = argparse.ArgumentParser(description = \"Download and apply a Pull Request (PR) patch from the PyTorch GitHub repository to your local PyTorch installation.\\n\\nBest Practice: Since this script involves hot-patching PyTorch, it's recommended to use a disposable environment like a Docker container or a dedicated Python virtual environment (venv). This ensures that if the patching fails, you can easily recover by resetting the environment.\", epilog = 'Example: \\n python nightly_hotpatch.py 12345\\n python nightly_hotpatch.py 12345 --directory /path/to/pytorch --strip 1\\n\\nThese commands will download the patch for PR #12345 and apply it to your local PyTorch installation.', formatter_class = argparse.RawDescriptionHelpFormatter) parser.add_argument('PR_NUMBER', type = int, help = 'The number of the Pull Request (PR) from the PyTorch GitHub repository to download and apply as a patch.') parser.add_argument('--directory', '-d', type = str, default = None, help = 'Optional. Specify the target directory to apply the patch. If not provided, the script will use the PyTorch installation path.') parser.add_argument('--strip', '-p', type = int, default = 1, help = 'Optional. Specify the strip count to remove leading directories from file paths in the patch. Default is 1.') return parser.parse_args()", - "docstring": "Parses command-line arguments using argparse. Returns: argparse.Namespace: The parsed arguments containing the PR number, optional target directory, and strip count.", - "type": "function", - "file_path": "pytorch\\tools\\nightly_hotpatch.py", - "ast_data": "FunctionDef name:parse_arguments arguments Assign Call call:ArgumentParser Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_multilinebaseline", - "source_code": "def set_multilinebaseline(self, t): self._multilinebaseline = t self.stale = True", - "docstring": "Set multilinebaseline. If True, the baseline for multiline text is adjusted so that it is (approximately) center-aligned with single-line text. This is used e.g. by the legend implementation so that single-line labels are baseline-aligned, but multiline labels are \"center\"-aligned with them.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", - "ast_data": "FunctionDef name:set_multilinebaseline arguments arg:self arg:t Assign Assign" - }, - { - "library": "tensorflow", - "name": "create_edges", - "source_code": "def create_edges(self): raise NotImplementedError", - "docstring": "Calls add_outgoing_edge for all edges known to this Convertible. This is used to build the graph dependencies, so that conversion of variables to constants can be properly propagated through the graph. Usually this method will call add_outgoing_edge() to all the Convertible inputs.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\convert_to_constants.py", - "ast_data": "FunctionDef name:create_edges arguments arg:self Raise raises:NotImplementedError" - }, - { - "library": "django", - "name": "Message", - "source_code": "class Message: def __init__(self, level, message, extra_tags = None): self.level = int(level) self.message = message self.extra_tags = extra_tags def _prepare(self): self.message = str(self.message) self.extra_tags = str(self.extra_tags) if self.extra_tags is not None else None def __eq__(self, other): if not isinstance(other, Message): return NotImplemented return self.level = = other.level and self.message = = other.message def __str__(self): return str(self.message) def __repr__(self): extra_tags = f', extra_tags = {self.extra_tags!r}' if self.extra_tags else '' return f'Message(level = {self.level}, message = {self.message!r}{extra_tags})' @property def tags(self): return ' '.join((tag for tag in [self.extra_tags, self.level_tag] if tag)) @property def level_tag(self): return LEVEL_TAGS.get(self.level, '')", - "docstring": "Represent an actual message that can be stored in any of the supported storage classes (typically session- or cookie-based) and rendered in a view or template.", - "type": "class", - "file_path": "django\\django\\contrib\\messages\\storage\\base.py", - "ast_data": "ClassDef name:Message FunctionDef name:__init__ arguments arg:self arg:level arg:message arg:extra_tags Assign Call call:int Assign Assign FunctionDef name:_prepare arguments arg:self Assign Call call:str Assign FunctionDef name:__eq__ arguments arg:self arg:other If Return return:yes Return return:yes FunctionDef name:__str__ arguments arg:self Return return:yes FunctionDef name:__repr__ arguments arg:self Assign Return return:yes FunctionDef name:tags arguments arg:self Return return:yes FunctionDef name:level_tag arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "ss2tf", - "source_code": "def ss2tf(A, B, C, D, input = 0): A, B, C, D = abcd_normalize(A, B, C, D) nout, nin = D.shape if input > = nin: raise ValueError('System does not have the input specified.') B = B[:, input: input + 1] D = D[:, input: input + 1] try: den = poly(A) except ValueError: den = 1 if B.size = = 0 and C.size = = 0: num = np.ravel(D) if D.size = = 0 and A.size = = 0: den = [] return (num, den) num_states = A.shape[0] type_test = A[:, 0] + B[:, 0] + C[0, :] + D + 0.0 num = np.empty((nout, num_states + 1), type_test.dtype) for k in range(nout): Ck = atleast_2d(C[k, :]) num[k] = poly(A - dot(B, Ck)) + (D[k] - 1) * den return (num, den)", - "docstring": "State-space to transfer function. A, B, C, D defines a linear state-space system with inputs, outputs, and state variables. Parameters ---------- A : array_like State (or system) matrix of shape `numden` is a sequence representation of the denominator polynomial. Examples -------- Convert the state-space representation: .. math:: \\dot{\\textbf{x}}(t) = \\begin{bmatrix} -2 & -1 \\\\ 1 & 0 \\end{bmatrix} \\textbf{x}(t) + \\begin{bmatrix} 1 \\\\ 0 \\end{bmatrix} \\textbf{u}(t) \\\\ \\textbf{y}(t) = \\begin{bmatrix} 1 & 2 \\end{bmatrix} \\textbf{x}(t) + \\begin{bmatrix} 1 \\end{bmatrix} \\textbf{u}(t) >>> A = [[-2, -1], [1, 0]] >>> B = [[1], [0]] # 2-D column vector >>> C = [[1, 2]] # 2-D row vector >>> D = 1 to the transfer function: .. math:: H(s) = \\frac{s^2 + 3s + 3}{s^2 + 2s + 1} >>> from scipy.signal import ss2tf >>> ss2tf(A, B, C, D) (array([[1., 3., 3.]]), array([ 1., 2., 1.]))", - "type": "function", - "file_path": "scipy\\scipy\\signal\\_lti_conversion.py", - "ast_data": "FunctionDef name:ss2tf arguments arg:A arg:B arg:C arg:D arg:input Assign Call call:abcd_normalize Assign If Compare op:GtE Raise raises:ValueError('System does not have the input specified.') Assign Assign Try Assign Call call:poly ExceptHandler Assign If BoolOp Compare op:Eq Compare op:Eq Assign Call call:ravel If BoolOp Compare op:Eq Compare op:Eq Assign Return return:yes Assign Assign Assign Call call:empty For Call call:range Assign Call call:atleast_2d Assign Return return:yes" - }, - { - "library": "authlib", - "name": "get_amr", - "source_code": "def get_amr(self, user) -> Optional[list[str]]: return None", - "docstring": "Authentication Methods References. Defined by :ref: as an option list of user-defined case-sensitive strings indication which authentication methods have been used to authenticate the user. Developers MAY re-implement this method:: def get_amr(self, user): return [\"2FA\"] if user.has_2fa_enabled() else []", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\rfc9068\\token.py", - "ast_data": "FunctionDef name:get_amr arguments arg:self arg:user Return return:yes" - }, - { - "library": "tensorflow", - "name": "row_limits", - "source_code": "def row_limits(self): return self._row_splits[1:]", - "docstring": "Returns the limit indices for rows in this row partition. These indices specify where the values for each row end. is equal to . Returns: A 1-D integer Tensor with shape . The returned tensor is nonnegative, and is sorted in ascending order. .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\row_partition.py", - "ast_data": "FunctionDef name:row_limits arguments arg:self Return return:yes" - }, - { - "library": "sphinx", - "name": "__init__", - "source_code": "def __init__(self, default: Any, rebuild: _ConfigRebuild, valid_types: _OptValidTypes, description: str = '') -> None: super().__setattr__('default', default) super().__setattr__('rebuild', rebuild) super().__setattr__('valid_types', valid_types) super().__setattr__('description', description)", - "docstring": "Configuration option type for Sphinx. The type is intended to be immutable; changing the field values is an unsupported action. No validation is performed on the values, though consumers will likely expect them to be of the types advertised. The old tuple-based interface will be removed in Sphinx 9.", - "type": "method", - "file_path": "sphinx\\sphinx\\config.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:default type:Any arg:rebuild type:_ConfigRebuild arg:valid_types type:_OptValidTypes arg:description type:str" - }, - { - "library": "pytorch", - "name": "hardswish", - "source_code": "def hardswish(input: Tensor, scale: float, zero_point: int) -> Tensor: if not input.is_quantized: raise ValueError(\"Input to 'quantized.hardswish' must be quantized!\") return torch._ops.ops.quantized.hardswish(input, scale, zero_point)", - "docstring": "This is the quantized version of :func:. Args: input: quantized input scale: quantization scale of the output tensor zero_point: quantization zero point of the output tensor", - "type": "function", - "file_path": "pytorch\\torch\\ao\\nn\\quantized\\functional.py", - "ast_data": "FunctionDef name:hardswish arguments arg:input type:Tensor arg:scale type:float arg:zero_point type:int If Raise raises:ValueError(\"Input to 'quantized.hardswish' must be quantized!\") Return return:yes" - }, - { - "library": "pytorch", - "name": "set_reshard_after_forward", - "source_code": "def set_reshard_after_forward(self, reshard_after_forward: bool, recurse: bool = True) -> None: self_module = cast(nn.Module, self) modules = list(self_module.modules()) if recurse else [self_module] for module in modules: if isinstance(module, FSDPModule): state = module._get_fsdp_state() if (fsdp_param_group: = state._fsdp_param_group): fsdp_param_group.post_forward_mesh_info = _get_post_forward_mesh_info(reshard_after_forward, fsdp_param_group.mesh_info)", - "docstring": "Sets if the module should reshard parameters after forward. This can be used to change the `` for training. Args: reshard_after_forward (bool): Whether to reshard parameters after forward. recurse (bool): Whether to set for all FSDP submodules or just the passed-in module.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fully_shard.py", - "ast_data": "FunctionDef name:set_reshard_after_forward arguments arg:self arg:reshard_after_forward type:bool arg:recurse type:bool Assign Call call:cast Assign For If Call call:isinstance Assign Call call:_get_fsdp_state If Assign Call call:_get_post_forward_mesh_info" - }, - { - "library": "tensorflow", - "name": "LocalResourceRestoreContext", - "source_code": "class LocalResourceRestoreContext(object): def __init__(self, instance): self.instance = instance", - "docstring": "Class holding information of a distributed instance, e.g. StaticHashTable. Pairing use with context manager allows operations under this context manager to conveniently gets information of a component of the (and other restored distributed if we're supporting their distribution in the future), instead of looking it up from the mapping of the worker-to-resource handle. This is especially useful when we know which instance the operations should execute with and the mapping is not available yet.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\ps_values.py", - "ast_data": "ClassDef name:LocalResourceRestoreContext FunctionDef name:__init__ arguments arg:self arg:instance Assign" - }, - { - "library": "tensorflow", - "name": "read_meta_graph_file", - "source_code": "def read_meta_graph_file(filename): meta_graph_def = meta_graph_pb2.MetaGraphDef() if not file_io.file_exists(filename): raise IOError(f'File does not exist. Received: {filename}.') with file_io.FileIO(filename, 'rb') as f: file_content = f.read() try: meta_graph_def.ParseFromString(file_content) if sys.byteorder = = 'big': bst.swap_tensor_content_in_graph_function(meta_graph_def, 'little', 'big') return meta_graph_def except Exception: pass try: text_format.Merge(file_content.decode('utf-8'), meta_graph_def) if sys.byteorder = = 'big': bst.swap_tensor_content_in_graph_function(meta_graph_def, 'little', 'big') except text_format.ParseError as e: raise IOError(f'Cannot parse file {filename}: {str(e)}.') return meta_graph_def", - "docstring": "Reads a file containing and returns the protocol buffer. Args: filename: filename including the path. Returns: A protocol buffer. Raises: IOError: If the file doesn't exist, or cannot be successfully parsed.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\meta_graph.py", - "ast_data": "FunctionDef name:read_meta_graph_file arguments arg:filename Assign Call call:MetaGraphDef If Raise raises:IOError(f'File does not exist. Received: {filename}.') With Assign Call call:read Try If Compare op:Eq Return return:yes ExceptHandler Try If Compare op:Eq ExceptHandler Raise raises:IOError(f'Cannot parse file {filename}: {str(e)}.') Return return:yes" - }, - { - "library": "pandas", - "name": "quantile", - "source_code": "@final def quantile(self, q: float | list[float] | AnyArrayLike = 0.5, **kwargs): return self._downsample('quantile', q = q, **kwargs)", - "docstring": "Return value at the given quantile. Parameters ---------- q : float or array-like, default 0.5 (50% quantile) Returns ------- DataFrame or Series Quantile of values within each group. See Also -------- Series.quantile Return a series, where the index is q and the values are the quantiles. DataFrame.quantile Return a DataFrame, where the columns are the columns of self, and the values are the quantiles. DataFrameGroupBy.quantile Return a DataFrame, where the columns are groupby columns, and the values are its quantiles. Examples -------- >>> ser = pd.Series( ... [1, 3, 2, 4, 3, 8], ... index=pd.DatetimeIndex( ... [ ... \"2023-01-01\", ... \"2023-01-10\", ... \"2023-01-15\", ... \"2023-02-01\", ... \"2023-02-10\", ... \"2023-02-15\", ... ] ... ), ... ) >>> ser.resample(\"MS\").quantile() 2023-01-01 2.0 2023-02-01 4.0 Freq: MS, dtype: float64 >>> ser.resample(\"MS\").quantile(0.25) 2023-01-01 1.5 2023-02-01 3.5 Freq: MS, dtype: float64", - "type": "method", - "file_path": "pandas\\pandas\\core\\resample.py", - "ast_data": "FunctionDef name:quantile arguments arg:self arg:q type:float | list[float] | AnyArrayLike kwarg:kwargs Return return:yes" - }, - { - "library": "pytorch", - "name": "synchronize", - "source_code": "def synchronize(device: _device_t = None) -> None: _lazy_init() device = _get_device_index(device, optional = True) return torch._C._xpu_synchronize(device)", - "docstring": "Wait for all kernels in all streams on a XPU device to complete. Args: device (torch.device or int, optional): device for which to synchronize. It uses the current device, given by :func:, if :attr: is `` (default).", - "type": "function", - "file_path": "pytorch\\torch\\xpu\\__init__.py", - "ast_data": "FunctionDef name:synchronize arguments arg:device type:_device_t Assign Call call:_get_device_index Return return:yes" - }, - { - "library": "pytorch", - "name": "pick_loop_order", - "source_code": "def pick_loop_order(stride_lengths: list[list[int]], sizes: Sequence[sympy.Expr], priority_idx: tuple[int, ...] = ()) -> list[int]: @functools.cmp_to_key def index_cmp(a: int, b: int) -> int: if sizes[a] = = 1 or sizes[b] = = 1: return cmp(sizes[a] = = 1, sizes[b] = = 1) stride_len_a = [abs(sl[a]) for sl in stride_lengths] stride_len_b = [abs(sl[b]) for sl in stride_lengths] a_first = sum((sl_b = = 0 or sl_a < sl_b for sl_a, sl_b in zip(stride_len_a, stride_len_b))) b_first = sum((sl_a = = 0 or sl_b < sl_a for sl_a, sl_b in zip(stride_len_a, stride_len_b))) if a_first > b_first: return -1 if b_first > a_first: return 1 return cmp(b, a) order = list(reversed(range(len(stride_lengths[0])))) if len(priority_idx) > 0: stride_lengths = [stride_lengths[pi] for pi in priority_idx] if config.pick_loop_orders: order.sort(key = index_cmp) return order", - "docstring": "A heuristic to decide loop iteration orders. This has not been well tuned and may be something we should autotune.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\scheduler.py", - "ast_data": "FunctionDef name:pick_loop_order arguments arg:stride_lengths type:list[list[int]] arg:sizes type:Sequence[sympy.Expr] arg:priority_idx type:tuple[int, ...] FunctionDef name:index_cmp arguments arg:a type:int arg:b type:int If BoolOp Compare op:Eq Compare op:Eq Return return:yes Assign Assign Assign Call call:sum Assign Call call:sum If Compare op:Gt Return return:yes If Compare op:Gt Return return:yes Return return:yes Assign Call call:list If Compare op:Gt Assign If Return return:yes" - }, - { - "library": "scikit-learn", - "name": "transform", - "source_code": "@available_if(_search_estimator_has('transform')) def transform(self, X): check_is_fitted(self) return self.best_estimator_.transform(X)", - "docstring": "Call transform on the estimator with the best found parameters. Only available if the underlying estimator supports `X` transformed in the new space based on the estimator with the best found parameters.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\model_selection\\_search.py", - "ast_data": "FunctionDef name:transform arguments arg:self arg:X Call call:available_if Return return:yes" - }, - { - "library": "kornia", - "name": "Rgb255ToNormals", - "source_code": "class Rgb255ToNormals(Module): def forward(self, image: Tensor) -> Tensor: return rgb255_to_normals(image)", - "docstring": "Convert an image from RGB [0, 255] to surface normals for visualization purposes. Returns: surface normals version of the image. Shape: - image: :math: - output: :math: Example: >>> input = torch.rand(2, 3, 4, 5) >>> normals = Rgb255ToNormals() >>> output = normals(input) # 2x3x4x5", - "type": "class", - "file_path": "kornia\\kornia\\color\\rgb.py", - "ast_data": "ClassDef name:Rgb255ToNormals FunctionDef name:forward arguments arg:self arg:image type:Tensor Return return:yes" - }, - { - "library": "scipy", - "name": "lil_array", - "source_code": "class lil_array(_lil_base, sparray): pass", - "docstring": "Row-based LIst of Lists sparse array. This is a structure for constructing sparse arrays incrementally. Note that inserting a single item can take linear time in the worst case; to construct the array efficiently, make sure the items are pre-sorted by index, per row. This can be instantiated in several ways: lil_array(D) where D is a 2-D ndarray lil_array(S) with another sparse array or matrix S (equivalent to S.tolil()) lil_array((M, N), [dtype]) to construct an empty array with shape (M, N) dtype is optional, defaulting to dtype='d'. Attributes ---------- dtype : dtype Data type of the array shape : 2-tuple Shape of the array ndim : int Number of dimensions (this is always 2) nnz size data LIL format data array of the array rows LIL format row index array of the array T Notes ----- Sparse arrays can be used in arithmetic operations: they support addition, subtraction, multiplication, division, and matrix power. Advantages of the LIL format - supports flexible slicing - changes to the array sparsity structure are efficient Disadvantages of the LIL format - arithmetic operations LIL + LIL are slow (consider CSR or CSC) - slow column slicing (consider CSC) - slow matrix vector products (consider CSR or CSC) Intended Usage - LIL is a convenient format for constructing sparse arrays - once an array has been constructed, convert to CSR or CSC format for fast arithmetic and matrix vector operations - consider using the COO format when constructing large arrays Data Structure - An array (``.", - "type": "class", - "file_path": "scipy\\scipy\\sparse\\_lil.py", - "ast_data": "ClassDef name:lil_array" - }, - { - "library": "pytorch", - "name": "unique_consecutive", - "source_code": "def unique_consecutive(self, return_inverse = False, return_counts = False, dim = None): if has_torch_function_unary(self): return handle_torch_function(Tensor.unique_consecutive, (self,), self, return_inverse = return_inverse, return_counts = return_counts, dim = dim) return torch.unique_consecutive(self, return_inverse = return_inverse, return_counts = return_counts, dim = dim)", - "docstring": "Eliminates all but the first element from every consecutive group of equivalent elements. See :func:", - "type": "method", - "file_path": "pytorch\\torch\\_tensor.py", - "ast_data": "FunctionDef name:unique_consecutive arguments arg:self arg:return_inverse arg:return_counts arg:dim If Call call:has_torch_function_unary Return return:yes Return return:yes" - }, - { - "library": "pandas", - "name": "get_chunks", - "source_code": "def get_chunks(self, n_chunks: int | None = None): if n_chunks and n_chunks > 1: size = len(self._col) step = size // n_chunks if size % n_chunks ! = 0: step + = 1 for start in range(0, step * n_chunks, step): yield PandasColumn(self._col.iloc[start: start + step], self._allow_copy) else: yield self", - "docstring": "Return an iterator yielding the chunks. See for details on ``.", - "type": "method", - "file_path": "pandas\\pandas\\core\\interchange\\column.py", - "ast_data": "FunctionDef name:get_chunks arguments arg:self arg:n_chunks type:int | None If BoolOp Compare op:Gt Assign Call call:len Assign If Compare op:NotEq For Call call:range" - }, - { - "library": "pytorch", - "name": "TensorInfo", - "source_code": "@dataclass class TensorInfo: allocation_stack_trace: Optional[traceback.StackSummary] reads: list[Access] = field(default_factory = list) write: Optional[Access] = None", - "docstring": "Stores information about a single tensor and recent accesses to it. Args: allocation_stack_trace: the stack summary object captured during tensor allocation. Can be `` if the allocation wasn't caught by CSAN. reads: list of read accesses to the tensor that were performed since the last write. write: the last write access to the tensor.", - "type": "class", - "file_path": "pytorch\\torch\\cuda\\_sanitizer.py", - "ast_data": "ClassDef name:TensorInfo" - }, - { - "library": "pandas", - "name": "get_slice_bound", - "source_code": "def get_slice_bound(self, label, side: Literal['left', 'right']) -> int: if side not in ('left', 'right'): raise ValueError(f\"Invalid value for side kwarg, must be either 'left' or 'right': {side}\") original_label = label label = self._maybe_cast_slice_bound(label, side) try: slc = self.get_loc(label) except KeyError as err: try: return self._searchsorted_monotonic(label, side) except ValueError: raise err from None if isinstance(slc, np.ndarray): assert is_bool_dtype(slc.dtype) slc = lib.maybe_booleans_to_slice(slc.view('u1')) if isinstance(slc, np.ndarray): raise KeyError(f'Cannot get {side} slice bound for non-unique label: {original_label!r}') if isinstance(slc, slice): if side = = 'left': return slc.start else: return slc.stop elif side = = 'right': return slc + 1 else: return slc", - "docstring": "Calculate slice bound that corresponds to given label. Returns leftmost (one-past-the-rightmost if `` is non-unique in the index, an error will be raised. >>> idx_duplicate = pd.Index([\"a\", \"b\", \"a\", \"c\", \"d\"]) >>> idx_duplicate.get_slice_bound(\"a\", \"left\") Traceback (most recent call last): KeyError: Cannot get left slice bound for non-unique label: 'a'", - "type": "method", - "file_path": "pandas\\pandas\\core\\indexes\\base.py", - "ast_data": "FunctionDef name:get_slice_bound arguments arg:self arg:label arg:side type:Literal['left', 'right'] If Compare op:NotIn Raise raises:ValueError(f\"Invalid value for side kwarg, must be either 'left' or 'right': {side}\") Assign Assign Call call:_maybe_cast_slice_bound Try Assign Call call:get_loc ExceptHandler Try Return return:yes ExceptHandler Raise raises:err If Call call:isinstance Assign Call call:maybe_booleans_to_slice If Call call:isinstance Raise raises:KeyError(f'Cannot get {side} slice bound for non-unique label: {original_label!r}') If Call call:isinstance If Compare op:Eq Return return:yes Return return:yes If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "seaborn", - "name": "tick", - "source_code": "def tick(self, locator: Locator | None = None) -> Nominal: new = copy(self) new._tick_params = {'locator': locator} return new", - "docstring": "Configure the selection of ticks for the scale's axis or legend. .. note:: This API is under construction and will be enhanced over time. At the moment, it is probably not very useful. Parameters ---------- locator : :class: subclass Pre-configured matplotlib locator; other parameters will not be used. Returns ------- Copy of self with new tick configuration.", - "type": "method", - "file_path": "seaborn\\seaborn\\_core\\scales.py", - "ast_data": "FunctionDef name:tick arguments arg:self arg:locator type:Locator | None Assign Call call:copy Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "subplots_adjust", - "source_code": "def subplots_adjust(self, left = None, bottom = None, right = None, top = None, wspace = None, hspace = None): if self.get_layout_engine() is not None and (not self.get_layout_engine().adjust_compatible): _api.warn_external('This figure was using a layout engine that is incompatible with subplots_adjust and/or tight_layout; not calling subplots_adjust.') return self.subplotpars.update(left, bottom, right, top, wspace, hspace) for ax in self.axes: if ax.get_subplotspec() is not None: ax._set_position(ax.get_subplotspec().get_position(self)) self.stale = True", - "docstring": "Adjust the subplot layout parameters. Unset parameters are left unmodified; initial values are given by :rc:. .. plot:: _embedded_plots/figure_subplots_adjust.py Parameters ---------- left : float, optional The position of the left edge of the subplots, as a fraction of the figure width. right : float, optional The position of the right edge of the subplots, as a fraction of the figure width. bottom : float, optional The position of the bottom edge of the subplots, as a fraction of the figure height. top : float, optional The position of the top edge of the subplots, as a fraction of the figure height. wspace : float, optional The width of the padding between subplots, as a fraction of the average Axes width. hspace : float, optional The height of the padding between subplots, as a fraction of the average Axes height.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\figure.py", - "ast_data": "FunctionDef name:subplots_adjust arguments arg:self arg:left arg:bottom arg:right arg:top arg:wspace arg:hspace If BoolOp Compare op:IsNot Return return:no For If Compare op:IsNot Assign" - }, - { - "library": "scikit-learn", - "name": "ensure_common_namespace_device", - "source_code": "def ensure_common_namespace_device(reference, *arrays): xp, is_array_api = get_namespace(reference) if is_array_api: device_ = device(reference) return [xp.asarray(a, device = device_) for a in arrays] else: return arrays", - "docstring": "Ensure that all arrays use the same namespace and device as reference. If necessary the arrays are moved to the same namespace and device as the reference array. Parameters ---------- reference : array Reference array. *arrays : array Arrays to check. Returns ------- arrays : list Arrays with the same namespace and device as reference.", - "type": "function", - "file_path": "scikit-learn\\sklearn\\utils\\_array_api.py", - "ast_data": "FunctionDef name:ensure_common_namespace_device arguments arg:reference vararg:arrays Assign Call call:get_namespace If Assign Call call:device Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "size", - "source_code": "def size(self, name = None): with ops.name_scope(name, '%s_Size' % self.name, [self.resource_handle]): with ops.colocate_with(self.resource_handle): return gen_lookup_ops.lookup_table_size_v2(self.resource_handle)", - "docstring": "Compute the number of elements in this table. Args: name: A name for the operation (optional). Returns: A scalar tensor containing the number of elements in this table.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", - "ast_data": "FunctionDef name:size arguments arg:self arg:name With With Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_resource", - "source_code": "def get_resource(self, feature_column, resource_name): if feature_column not in self._cols_to_resources_map or resource_name not in self._cols_to_resources_map[feature_column]: raise ValueError('Resource does not exist.') return self._cols_to_resources_map[feature_column][resource_name]", - "docstring": "Returns an already created resource. Resources can be things such as tables, variables, trackables, etc. Args: feature_column: A object this variable corresponds to. resource_name: Name of the resource.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\feature_column\\feature_column_v2.py", - "ast_data": "FunctionDef name:get_resource arguments arg:self arg:feature_column arg:resource_name If BoolOp Compare op:NotIn Compare op:NotIn Raise raises:ValueError('Resource does not exist.') Return return:yes" - }, - { - "library": "tensorflow", - "name": "mean_squared_error", - "source_code": "@dispatch.add_dispatch_support def mean_squared_error(y_true, y_pred): y_pred = tensor_conversion.convert_to_tensor_v2_with_dispatch(y_pred) y_true = math_ops.cast(y_true, y_pred.dtype) return backend.mean(math_ops.squared_difference(y_pred, y_true), axis = -1)", - "docstring": "Computes the mean squared error between labels and predictions. After computing the squared distance between the inputs, the mean value over the last dimension is returned. Standalone usage: >>> y_true = np.random.randint(0, 2, size=(2, 3)) >>> y_pred = np.random.random(size=(2, 3)) >>> loss = tf.keras.losses.mean_squared_error(y_true, y_pred) >>> assert loss.shape == (2,) >>> assert np.array_equal( ... loss.numpy(), np.mean(np.square(y_true - y_pred), axis=-1)) Args: y_true: Ground truth values. shape = . y_pred: The predicted values. shape = . Returns: Mean squared error values. shape = .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", - "ast_data": "FunctionDef name:mean_squared_error arguments arg:y_true arg:y_pred Assign Call call:convert_to_tensor_v2_with_dispatch Assign Call call:cast Return return:yes" - }, - { - "library": "pytorch", - "name": "register_hook", - "source_code": "@abc.abstractmethod def register_hook(self, fn: Callable[..., Any]) -> RemovableHandle: raise NotImplementedError", - "docstring": "Register a backward hook. The hook will be called every time a gradient with respect to the Node is computed. The hook should have the following signature:: hook(grad_inputs: Tuple[Tensor], grad_outputs: Tuple[Tensor]) -> Tuple[Tensor] or None The hook should not modify its argument, but it can optionally return a new gradient which will be used in place of :attr:. This function returns a handle with a method `backward-hooks-executiongrad_outputsgrad_inputsgrad_outputs`. Example:: >>> import torch >>> a = torch.tensor([0., 0., 0.], requires_grad=True) >>> b = a.clone() >>> assert isinstance(b.grad_fn, torch.autograd.graph.Node) >>> handle = b.grad_fn.register_hook(lambda gI, gO: (gO[0] * 2,)) >>> b.sum().backward(retain_graph=True) >>> print(a.grad) tensor([2., 2., 2.]) >>> handle.remove() # Removes the hook >>> a.grad = None >>> b.sum().backward(retain_graph=True) >>> print(a.grad) tensor([1., 1., 1.])", - "type": "method", - "file_path": "pytorch\\torch\\autograd\\graph.py", - "ast_data": "FunctionDef name:register_hook arguments arg:self arg:fn type:Callable[..., Any] Raise raises:NotImplementedError" - }, - { - "library": "django", - "name": "destroy_test_db", - "source_code": "def destroy_test_db(self, old_database_name = None, verbosity = 1, keepdb = False, suffix = None): self.connection.close() if suffix is None: test_database_name = self.connection.settings_dict['NAME'] else: test_database_name = self.get_test_db_clone_settings(suffix)['NAME'] if verbosity > = 1: action = 'Destroying' if keepdb: action = 'Preserving' self.log('%s test database for alias %s...' % (action, self._get_database_display_str(verbosity, test_database_name))) if not keepdb: self._destroy_test_db(test_database_name, verbosity) if old_database_name is not None: settings.DATABASES[self.connection.alias]['NAME'] = old_database_name self.connection.settings_dict['NAME'] = old_database_name", - "docstring": "Destroy a test database, prompting the user for confirmation if the database already exists.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\creation.py", - "ast_data": "FunctionDef name:destroy_test_db arguments arg:self arg:old_database_name arg:verbosity arg:keepdb arg:suffix If Compare op:Is Assign Assign If Compare op:GtE Assign If Assign If If Compare op:IsNot Assign Assign" - }, - { - "library": "pytorch", - "name": "replace_all_uses", - "source_code": "def replace_all_uses(self, old: str, new: str): assert isinstance(old, str) assert isinstance(new, str) arg_types = (TensorArgument, SymIntArgument, SymFloatArgument, SymBoolArgument, CustomObjArgument, TokenArgument) for o in self.output_specs: if isinstance(o.arg, arg_types): if o.arg.name = = old: o.arg.name = new for i in self.input_specs: if isinstance(i.arg, arg_types): if i.arg.name = = old: i.arg.name = new", - "docstring": "Replace all uses of the old name with new name in the signature.", - "type": "method", - "file_path": "pytorch\\torch\\export\\graph_signature.py", - "ast_data": "FunctionDef name:replace_all_uses arguments arg:self arg:old type:str arg:new type:str Assign For If Call call:isinstance If Compare op:Eq Assign For If Call call:isinstance If Compare op:Eq Assign" - }, - { - "library": "scikit-learn", - "name": "Backend", - "source_code": "class Backend(Enum): ARRAY_API_STRICT = ('array_api_strict', _compat.is_array_api_strict_namespace) NUMPY = ('numpy', _compat.is_numpy_namespace) NUMPY_READONLY = ('numpy_readonly', _compat.is_numpy_namespace) CUPY = ('cupy', _compat.is_cupy_namespace) TORCH = ('torch', _compat.is_torch_namespace) DASK = ('dask.array', _compat.is_dask_namespace) SPARSE = ('sparse', _compat.is_pydata_sparse_namespace) JAX = ('jax.numpy', _compat.is_jax_namespace) def __new__(cls, value: str, _is_namespace: Callable[[ModuleType], bool]): obj = object.__new__(cls) obj._value_ = value return obj def __init__(self, value: str, is_namespace: Callable[[ModuleType], bool]): self.is_namespace = is_namespace def __str__(self) -> str: return cast(str, self.value)", - "docstring": "All array library backends explicitly tested by array-api-extra. Parameters ---------- value : str Name of the backend's module. is_namespace : Callable[[ModuleType], bool] Function to check whether an input module is the array namespace corresponding to the backend.", - "type": "class", - "file_path": "scikit-learn\\sklearn\\externals\\array_api_extra\\_lib\\_backends.py", - "ast_data": "ClassDef name:Backend Assign Assign Assign Assign Assign Assign Assign Assign FunctionDef name:__new__ arguments arg:cls arg:value type:str arg:_is_namespace type:Callable[[ModuleType], bool] Assign Call call:__new__ Assign Return return:yes FunctionDef name:__init__ arguments arg:self arg:value type:str arg:is_namespace type:Callable[[ModuleType], bool] Assign FunctionDef name:__str__ arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "compute_output_signature", - "source_code": "@doc_controls.for_subclass_implementers def compute_output_signature(self, input_signature): def check_type_return_shape(s): if not isinstance(s, tensor_lib.TensorSpec): raise TypeError('Only TensorSpec signature types are supported, but saw signature entry: {}.'.format(s)) return s.shape input_shape = nest.map_structure(check_type_return_shape, input_signature) output_shape = self.compute_output_shape(input_shape) dtype = self._compute_dtype if dtype is None: input_dtypes = [s.dtype for s in nest.flatten(input_signature)] dtype = input_dtypes[0] return nest.map_structure(lambda s: tensor_lib.TensorSpec(dtype = dtype, shape = s), output_shape)", - "docstring": "Compute the output tensor signature of the layer based on the inputs. Unlike a TensorShape object, a TensorSpec object contains both shape and dtype information for a tensor. This method allows layers to provide output dtype information if it is different from the input dtype. For any layer that doesn't implement this function, the framework will fall back to use , and will assume that the output dtype matches the input dtype. Args: input_signature: Single TensorSpec or nested structure of TensorSpec objects, describing a candidate input for the layer. Returns: Single TensorSpec or nested structure of TensorSpec objects, describing how the layer would transform the provided input. Raises: TypeError: If input_signature contains a non-TensorSpec object.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", - "ast_data": "FunctionDef name:compute_output_signature arguments arg:self arg:input_signature FunctionDef name:check_type_return_shape arguments arg:s If Raise raises:TypeError('Only TensorSpec signature types are supported, but saw signature entry: {}.'.format(s)) Return return:yes Assign Call call:map_structure Assign Call call:compute_output_shape Assign If Compare op:Is Assign Assign Return return:yes" - }, - { - "library": "kornia", - "name": "make_upright", - "source_code": "def make_upright(laf: Tensor, eps: float = 1e-09) -> Tensor: KORNIA_CHECK_LAF(laf) det = get_laf_scale(laf) scale = det b2a2 = torch.sqrt(laf[..., 0: 1, 1: 2] ** 2 + laf[..., 0: 1, 0: 1] ** 2) + eps laf1_ell = concatenate([(b2a2 / det).contiguous(), torch.zeros_like(det)], dim = 3) laf2_ell = concatenate([(laf[..., 1: 2, 1: 2] * laf[..., 0: 1, 1: 2] + laf[..., 1: 2, 0: 1] * laf[..., 0: 1, 0: 1]) / (b2a2 * det), (det / b2a2).contiguous()], dim = 3) laf_unit_scale = concatenate([concatenate([laf1_ell, laf2_ell], dim = 2), laf[..., :, 2: 3]], dim = 3) return scale_laf(laf_unit_scale, scale)", - "docstring": "Rectify the affine matrix, so that it becomes upright. Args: laf: :math: eps: for safe division. Returns: laf: :math: Example: >>> input = torch.ones(1, 5, 2, 3) # BxNx2x3 >>> output = make_upright(input) # BxNx2x3", - "type": "function", - "file_path": "kornia\\kornia\\feature\\laf.py", - "ast_data": "FunctionDef name:make_upright arguments arg:laf type:Tensor arg:eps type:float Assign Call call:get_laf_scale Assign Assign Assign Call call:concatenate Assign Call call:concatenate Assign Call call:concatenate Return return:yes" - }, - { - "library": "pytorch", - "name": "sum_tensors", - "source_code": "@staticmethod def sum_tensors(arg: Any) -> int: total_memory = 0 def sum_bytes(t: torch.Tensor) -> None: nonlocal total_memory total_memory + = t.untyped_storage().nbytes() tree_map_only(torch.Tensor, sum_bytes, arg) return total_memory", - "docstring": "Calculate total memory consumed by the tensors in the argument.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\_tools\\fake_collectives.py", - "ast_data": "FunctionDef name:sum_tensors arguments arg:arg type:Any Assign FunctionDef name:sum_bytes arguments arg:t type:torch.Tensor Return return:yes" - }, - { - "library": "scikit-learn", - "name": "predict_proba", - "source_code": "@available_if(_check_predict_proba) def predict_proba(self, X): check_is_fitted(self) results = [estimator.predict_proba(X) for estimator in self.estimators_] return results", - "docstring": "Return prediction probabilities for each class of each output. This method will raise a `classes_`) for that particular output.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\multioutput.py", - "ast_data": "FunctionDef name:predict_proba arguments arg:self arg:X Call call:available_if Assign Return return:yes" - }, - { - "library": "scrapy", - "name": "update_vars", - "source_code": "def update_vars(self, vars: dict[str, Any]) -> None: pass", - "docstring": "You can use this function to update the Scrapy objects that will be available in the shell", - "type": "method", - "file_path": "scrapy\\scrapy\\commands\\shell.py", - "ast_data": "FunctionDef name:update_vars arguments arg:self arg:vars type:dict[str, Any]" - }, - { - "library": "tensorflow", - "name": "extract_tensors_from_dataset", - "source_code": "def extract_tensors_from_dataset(dataset): iterator = get_iterator(dataset) inputs, targets, sample_weight = unpack_iterator_input(iterator) return (inputs, targets, sample_weight)", - "docstring": "Extract a tuple of tensors from a dataset. Args: dataset: Dataset instance. Returns: Tuple of tensors . and entry may be None.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", - "ast_data": "FunctionDef name:extract_tensors_from_dataset arguments arg:dataset Assign Call call:get_iterator Assign Call call:unpack_iterator_input Return return:yes" - }, - { - "library": "feincms", - "name": "translation_set_language", - "source_code": "def translation_set_language(request: HttpRequest, select_language: str) -> Optional[HttpResponseRedirect]: select_language = translation_allowed_language(select_language) if translation.check_for_language(select_language): fallback = False else: select_language = PRIMARY_LANGUAGE fallback = True translation.activate(select_language) request.LANGUAGE_CODE = translation.get_language() if hasattr(request, 'session'): current_session_language = request.session.get(LANGUAGE_SESSION_KEY, PRIMARY_LANGUAGE) if select_language ! = current_session_language: request.session[LANGUAGE_SESSION_KEY] = select_language elif request.method = = 'GET' and (not fallback): response = HttpResponseRedirect(request.get_full_path()) response.set_cookie(str(LANGUAGE_COOKIE_NAME), select_language, samesite = 'Lax') return response", - "docstring": "Set and activate a language, if that language is available.", - "type": "function", - "file_path": "feincms\\feincms\\extensions\\translations.py", - "ast_data": "FunctionDef name:translation_set_language arguments arg:request type:HttpRequest arg:select_language type:str Assign Call call:translation_allowed_language If Call call:check_for_language Assign Assign Assign Assign Call call:get_language If Call call:hasattr Assign Call call:get If Compare op:NotEq Assign If BoolOp Compare op:Eq Assign Call call:HttpResponseRedirect Return return:yes" - }, - { - "library": "scipy", - "name": "NestedFixedRule", - "source_code": "class NestedFixedRule(FixedRule): def __init__(self, higher, lower): self.higher = higher self.lower = lower self.xp = None @property def nodes_and_weights(self): if self.higher is not None: return self.higher.nodes_and_weights else: raise NotImplementedError @property def lower_nodes_and_weights(self): if self.lower is not None: return self.lower.nodes_and_weights else: raise NotImplementedError def estimate_error(self, f, a, b, args = ()): nodes, weights = self.nodes_and_weights lower_nodes, lower_weights = self.lower_nodes_and_weights if self.xp is None: self.xp = array_namespace(nodes) error_nodes = self.xp.concat([nodes, lower_nodes], axis = 0) error_weights = self.xp.concat([weights, -lower_weights], axis = 0) return self.xp.abs(_apply_fixed_rule(f, a, b, error_nodes, error_weights, args, self.xp))", - "docstring": "A cubature rule with error estimate given by the difference between two underlying fixed rules. If constructed as ``, this will use:: estimate(f, a, b) := higher.estimate(f, a, b) estimate_error(f, a, b) := \\|higher.estimate(f, a, b) - lower.estimate(f, a, b)| (where the absolute value is taken elementwise). Attributes ---------- higher : Rule Higher accuracy rule. lower : Rule Lower accuracy rule. See Also -------- GaussKronrodQuadrature Examples -------- >>> from scipy.integrate import cubature >>> from scipy.integrate._rules import ( ... GaussLegendreQuadrature, NestedFixedRule, ProductNestedFixed ... ) >>> higher = GaussLegendreQuadrature(10) >>> lower = GaussLegendreQuadrature(5) >>> rule = NestedFixedRule( ... higher, ... lower ... ) >>> rule_2d = ProductNestedFixed([rule, rule])", - "type": "class", - "file_path": "scipy\\scipy\\integrate\\_rules\\_base.py", - "ast_data": "ClassDef name:NestedFixedRule FunctionDef name:__init__ arguments arg:self arg:higher arg:lower Assign Assign Assign FunctionDef name:nodes_and_weights arguments arg:self If Compare op:IsNot Return return:yes Raise raises:NotImplementedError FunctionDef name:lower_nodes_and_weights arguments arg:self If Compare op:IsNot Return return:yes Raise raises:NotImplementedError FunctionDef name:estimate_error arguments arg:self arg:f arg:a arg:b arg:args Assign Assign If Compare op:Is Assign Call call:array_namespace Assign Call call:concat Assign Call call:concat Return return:yes" - }, - { - "library": "numpy", - "name": "concatenate", - "source_code": "def concatenate(arrays, axis = 0): d = np.concatenate([getdata(a) for a in arrays], axis) rcls = get_masked_subclass(*arrays) data = d.view(rcls) for x in arrays: if getmask(x) is not nomask: break else: return data dm = np.concatenate([getmaskarray(a) for a in arrays], axis) dm = dm.reshape(d.shape) data._mask = _shrink_mask(dm) return data", - "docstring": "Concatenate a sequence of arrays along the given axis. Parameters ---------- arrays : sequence of array_like The arrays must have the same shape, except in the dimension corresponding to (the first, by default). axis : int, optional The axis along which the arrays will be joined. Default is 0. Returns ------- result : MaskedArray The concatenated array with any masked entries preserved. See Also -------- numpy.concatenate : Equivalent function in the top-level NumPy module. Examples -------- >>> import numpy as np >>> import numpy.ma as ma >>> a = ma.arange(3) >>> a[1] = ma.masked >>> b = ma.arange(2, 5) >>> a masked_array(data=[0, --, 2], mask=[False, True, False], fill_value=999999) >>> b masked_array(data=[2, 3, 4], mask=False, fill_value=999999) >>> ma.concatenate([a, b]) masked_array(data=[0, --, 2, 2, 3, 4], mask=[False, True, False, False, False, False], fill_value=999999)", - "type": "function", - "file_path": "numpy\\numpy\\ma\\core.py", - "ast_data": "FunctionDef name:concatenate arguments arg:arrays arg:axis Assign Call call:concatenate Assign Call call:get_masked_subclass Assign Call call:view For If Compare op:IsNot Return return:yes Assign Call call:concatenate Assign Call call:reshape Assign Call call:_shrink_mask Return return:yes" - }, - { - "library": "tensorflow", - "name": "enable_save_as_bf16", - "source_code": "@tf_export('experimental.dtensor.enable_save_as_bf16', v1 = []) def enable_save_as_bf16(variables: List[tf_variables.Variable]): for v in variables: if isinstance(v, d_variable.DVariable): v.save_as_bf16 = True", - "docstring": "Allows float32 DVariables to be checkpointed and restored as bfloat16. The method only affects the DVariable part inside the model and leaves non-DTensor Variables/Tensors untouched. Args: variables: A list of tf.Variable to be enabled with bfloat16 save/restore. Only has effect on DTensor Variables as they go through d_variables with DTensor Specific logis.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\dtensor\\python\\save_restore.py", - "ast_data": "FunctionDef name:enable_save_as_bf16 arguments arg:variables type:List[tf_variables.Variable] Call call:tf_export For If Call call:isinstance Assign" - }, - { - "library": "pytorch", - "name": "apply_random_seed", - "source_code": "def apply_random_seed(datapipe: DataPipe, rng: torch.Generator) -> DataPipe: graph = traverse_dps(datapipe) all_pipes = get_all_graph_pipes(graph) cache = set() random_datapipes = [] for pipe in all_pipes: if id(pipe) in cache: continue if _is_random_datapipe(pipe): random_datapipes.append(pipe) cache.add(id(pipe)) for pipe in random_datapipes: random_seed = int(torch.empty((), dtype = torch.int64).random_(generator = rng).item()) pipe.set_seed(random_seed) return datapipe", - "docstring": "Traverse the graph of ``. Args: datapipe: DataPipe that needs to set randomness rng: Random number generator to generate random seeds", - "type": "function", - "file_path": "pytorch\\torch\\utils\\data\\graph_settings.py", - "ast_data": "FunctionDef name:apply_random_seed arguments arg:datapipe type:DataPipe arg:rng type:torch.Generator Assign Call call:traverse_dps Assign Call call:get_all_graph_pipes Assign Call call:set Assign For If Compare op:In If Call call:_is_random_datapipe For Assign Call call:int Return return:yes" - }, - { - "library": "matplotlib", - "name": "__call__", - "source_code": "def __call__(self, X, alpha = None, bytes = False): rgba, mask = self._get_rgba_and_mask(X, alpha = alpha, bytes = bytes) if not np.iterable(X): rgba = tuple(rgba) return rgba", - "docstring": "Parameters ---------- X : float or int or array-like The data value(s) to convert to RGBA. For floats, *X* should be in the interval `numpy.uint8`.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\colors.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:X arg:alpha arg:bytes Assign Call call:_get_rgba_and_mask If Assign Call call:tuple Return return:yes" - }, - { - "library": "kornia", - "name": "rgba_to_rgb", - "source_code": "def rgba_to_rgb(image: Tensor) -> Tensor: if not isinstance(image, Tensor): raise TypeError(f'Input type is not a Tensor. Got {type(image)}') if len(image.shape) < 3 or image.shape[-3] ! = 4: raise ValueError(f'Input size must have a shape of (*, 4, H, W).Got {image.shape}') r, g, b, a = torch.chunk(image, image.shape[-3], dim = -3) a_one = torch.tensor(1.0) - a r_new: Tensor = a_one * r + a * r g_new: Tensor = a_one * g + a * g b_new: Tensor = a_one * b + a * b return torch.cat([r_new, g_new, b_new], dim = -3)", - "docstring": "Convert an image from RGBA to RGB. Args: image: RGBA Image to be converted to RGB of shape :math:. Returns: RGB version of the image with shape :math:. Example: >>> input = torch.rand(2, 4, 4, 5) >>> output = rgba_to_rgb(input) # 2x3x4x5", - "type": "function", - "file_path": "kornia\\kornia\\color\\rgb.py", - "ast_data": "FunctionDef name:rgba_to_rgb arguments arg:image type:Tensor If Raise raises:TypeError(f'Input type is not a Tensor. Got {type(image)}') If BoolOp Compare op:Lt Compare op:NotEq Raise raises:ValueError(f'Input size must have a shape of (*, 4, H, W).Got {image.shape}') Assign Call call:chunk Assign Return return:yes" - }, - { - "library": "django", - "name": "set_as_test_mirror", - "source_code": "def set_as_test_mirror(self, primary_settings_dict): self.connection.settings_dict['NAME'] = primary_settings_dict['NAME']", - "docstring": "Set this database up to be used in testing as a mirror of a primary database whose settings are given.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\creation.py", - "ast_data": "FunctionDef name:set_as_test_mirror arguments arg:self arg:primary_settings_dict Assign" - }, - { - "library": "tensorflow", - "name": "VariableWatcher", - "source_code": "class VariableWatcher(object): __slots__ = ['_variable_watcher'] def __init__(self): self._variable_watcher = None def __enter__(self): self._variable_watcher = pywrap_tfe.TFE_Py_VariableWatcherNew() return self def __exit__(self, typ, value, traceback): pywrap_tfe.TFE_Py_VariableWatcherRemove(self._variable_watcher) def watched_variables(self): return pywrap_tfe.TFE_Py_VariableWatcherWatchedVariables(self._variable_watcher)", - "docstring": "A scope that tracks all trainable variable accesses within it. This explicitly ignores variables that are not marked as trainable. Sample usage: var = tf.Variable(0.0) with VariableWatcher() as variable_watcher: var.assign_add(1.0) assert variable_watcher.watched_variables == [var]", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\eager\\record.py", - "ast_data": "ClassDef name:VariableWatcher Assign FunctionDef name:__init__ arguments arg:self Assign FunctionDef name:__enter__ arguments arg:self Assign Call call:TFE_Py_VariableWatcherNew Return return:yes FunctionDef name:__exit__ arguments arg:self arg:typ arg:value arg:traceback FunctionDef name:watched_variables arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "new", - "source_code": "def new(self, values = None): new_context = copy(self) new_context._reset_dicts(values) return new_context", - "docstring": "Return a new context with the same properties, but with only the values given in 'values' stored.", - "type": "method", - "file_path": "django\\django\\template\\context.py", - "ast_data": "FunctionDef name:new arguments arg:self arg:values Assign Call call:copy Return return:yes" - }, - { - "library": "django", - "name": "adapt_datefield_value", - "source_code": "def adapt_datefield_value(self, value): return value", - "docstring": "Transform a date value to an object compatible with what is expected by the backend driver for date columns. The default implementation transforms the date to text, but that is not necessary for Oracle.", - "type": "method", - "file_path": "django\\django\\db\\backends\\oracle\\operations.py", - "ast_data": "FunctionDef name:adapt_datefield_value arguments arg:self arg:value Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_radius", - "source_code": "def set_radius(self, radius): self.width = self.height = 2 * radius self.stale = True", - "docstring": "Set the radius of the circle. Parameters ---------- radius : float", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:set_radius arguments arg:self arg:radius Assign Assign" - }, - { - "library": "pandas", - "name": "write_file", - "source_code": "def write_file(self) -> None: with get_handle(self._fname, 'wb', compression = self._compression, is_text = False, storage_options = self.storage_options) as self.handles: if self.handles.compression['method'] is not None: self._output_file, self.handles.handle = (self.handles.handle, BytesIO()) self.handles.created_handles.append(self.handles.handle) try: self._write_header(data_label = self._data_label, time_stamp = self._time_stamp) self._write_map() self._write_variable_types() self._write_varnames() self._write_sortlist() self._write_formats() self._write_value_label_names() self._write_variable_labels() self._write_expansion_fields() self._write_characteristics() records = self._prepare_data() self._write_data(records) self._write_strls() self._write_value_labels() self._write_file_close_tag() self._write_map() self._close() except Exception as exc: self.handles.close() if isinstance(self._fname, (str, os.PathLike)) and os.path.isfile(self._fname): try: os.unlink(self._fname) except OSError: warnings.warn(f'This save was not successful but {self._fname} could not be deleted. This file is not valid.', ResourceWarning, stacklevel = find_stack_level()) raise exc", - "docstring": "Export DataFrame object to Stata dta format. This method writes the contents of a pandas DataFrame to a file compatible with Stata. It includes features for handling value labels, variable types, and metadata like timestamps and data labels. The output file can then be read and used in Stata or other compatible statistical tools. See Also -------- read_stata : Read Stata file into DataFrame. DataFrame.to_stata : Export DataFrame object to Stata dta format. io.stata.StataWriter : A class for writing Stata binary dta files. Examples -------- >>> df = pd.DataFrame( ... { ... \"fully_labelled\": [1, 2, 3, 3, 1], ... \"partially_labelled\": [1.0, 2.0, np.nan, 9.0, np.nan], ... \"Y\": [7, 7, 9, 8, 10], ... \"Z\": pd.Categorical([\"j\", \"k\", \"l\", \"k\", \"j\"]), ... } ... ) >>> path = \"/My_path/filename.dta\" >>> labels = { ... \"fully_labelled\": {1: \"one\", 2: \"two\", 3: \"three\"}, ... \"partially_labelled\": {1.0: \"one\", 2.0: \"two\"}, ... } >>> writer = pd.io.stata.StataWriter( ... path, df, value_labels=labels ... ) # doctest: +SKIP >>> writer.write_file() # doctest: +SKIP >>> df = pd.read_stata(path) # doctest: +SKIP >>> df # doctest: +SKIP index fully_labelled partially_labeled Y Z 0 0 one one 7 j 1 1 two two 7 k 2 2 three NaN 9 l 3 3 three 9.0 8 k 4 4 one NaN 10 j", - "type": "method", - "file_path": "pandas\\pandas\\io\\stata.py", - "ast_data": "FunctionDef name:write_file arguments arg:self With If Compare op:IsNot Assign Try Assign Call call:_prepare_data ExceptHandler If BoolOp Call call:isinstance Call call:isfile Try ExceptHandler Raise raises:exc" - }, - { - "library": "tensorflow", - "name": "reset_from_key_counter", - "source_code": "def reset_from_key_counter(self, key, counter): counter = _convert_to_state_tensor(counter) key = _convert_to_state_tensor(key) counter.shape.assert_is_compatible_with([_get_state_size(self.algorithm) - 1]) key.shape.assert_is_compatible_with([]) key = array_ops.reshape(key, [1]) state = array_ops.concat([counter, key], 0) self._state_var.assign(state)", - "docstring": "Resets the generator by a new key-counter pair. See for the meaning of \"key\" and \"counter\". Args: key: the new key. counter: the new counter.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\stateful_random_ops.py", - "ast_data": "FunctionDef name:reset_from_key_counter arguments arg:self arg:key arg:counter Assign Call call:_convert_to_state_tensor Assign Call call:_convert_to_state_tensor Assign Call call:reshape Assign Call call:concat" - }, - { - "library": "pandas", - "name": "construct_array_type", - "source_code": "@classmethod def construct_array_type(cls) -> type_t[BooleanArray]: return BooleanArray", - "docstring": "Return the array type associated with this dtype. Returns ------- type", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\boolean.py", - "ast_data": "FunctionDef name:construct_array_type arguments arg:cls Return return:yes" - }, - { - "library": "pandas", - "name": "mask_zero_div_zero", - "source_code": "def mask_zero_div_zero(x, y, result: np.ndarray) -> np.ndarray: if not hasattr(y, 'dtype'): y = np.array(y) if not hasattr(x, 'dtype'): x = np.array(x) zmask = y = = 0 if zmask.any(): zneg_mask = zmask & np.signbit(y) zpos_mask = zmask & ~zneg_mask x_lt0 = x < 0 x_gt0 = x > 0 nan_mask = zmask & (x = = 0) neginf_mask = zpos_mask & x_lt0 | zneg_mask & x_gt0 posinf_mask = zpos_mask & x_gt0 | zneg_mask & x_lt0 if nan_mask.any() or neginf_mask.any() or posinf_mask.any(): result = result.astype('float64', copy = False) result[nan_mask] = np.nan result[posinf_mask] = np.inf result[neginf_mask] = -np.inf return result", - "docstring": "Set results of 0 // 0 to np.nan, regardless of the dtypes of the numerator or the denominator. Parameters ---------- x : ndarray y : ndarray result : ndarray Returns ------- ndarray The filled result. Examples -------- >>> x = np.array([1, 0, -1], dtype=np.int64) >>> x array([ 1, 0, -1]) >>> y = 0 # int 0; numpy behavior is different with float >>> result = x // y >>> result # raw numpy result does not fill division by zero array([0, 0, 0]) >>> mask_zero_div_zero(x, y, result) array([ inf, nan, -inf])", - "type": "function", - "file_path": "pandas\\pandas\\core\\ops\\missing.py", - "ast_data": "FunctionDef name:mask_zero_div_zero arguments arg:x arg:y arg:result type:np.ndarray If Assign Call call:array If Assign Call call:array Assign Compare op:Eq If Call call:any Assign Assign Assign Compare op:Lt Assign Compare op:Gt Assign Assign Assign If BoolOp Call call:any Call call:any Call call:any Assign Call call:astype Assign Assign Assign Return return:yes" - }, - { - "library": "django", - "name": "get_model_perms", - "source_code": "def get_model_perms(self, request): return {'add': self.has_add_permission(request), 'change': self.has_change_permission(request), 'delete': self.has_delete_permission(request), 'view': self.has_view_permission(request)}", - "docstring": "Return a dict of all perms for this model. This dict has the keys `` mapping to the True/False for each of those actions.", - "type": "method", - "file_path": "django\\django\\contrib\\admin\\options.py", - "ast_data": "FunctionDef name:get_model_perms arguments arg:self arg:request Return return:yes" - }, - { - "library": "scipy", - "name": "TransferFunctionContinuous", - "source_code": "class TransferFunctionContinuous(TransferFunction, lti): def to_discrete(self, dt, method = 'zoh', alpha = None): return TransferFunction(*cont2discrete((self.num, self.den), dt, method = method, alpha = alpha)[: -1], dt = dt)", - "docstring": "Continuous-time Linear Time Invariant system in transfer function form. Represents the system as the transfer function :math:, where :math: are elements of the numerator , :math: are elements of the denominator , and `TransferFunctionltiTransferFunctionltiStateSpaceTransferFunctionZerosPolesGainTransferFunctionABCDH(s) = \\frac{s^2 + 3s + 3}{s^2 + 2s + 1}`: >>> from scipy import signal >>> num = [1, 3, 3] >>> den = [1, 2, 1] >>> signal.TransferFunction(num, den) TransferFunctionContinuous( array([ 1., 3., 3.]), array([ 1., 2., 1.]), dt: None )", - "type": "class", - "file_path": "scipy\\scipy\\signal\\_ltisys.py", - "ast_data": "ClassDef name:TransferFunctionContinuous FunctionDef name:to_discrete arguments arg:self arg:dt arg:method arg:alpha Return return:yes" - }, - { - "library": "cherrypy", - "name": "make_file", - "source_code": "def make_file(self): return tempfile.TemporaryFile()", - "docstring": "Return a file-like object into which the request body will be read. By default, this will return a TemporaryFile. Override as needed. See also :attr:.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\_cpreqbody.py", - "ast_data": "FunctionDef name:make_file arguments arg:self Return return:yes" - }, - { - "library": "kornia", - "name": "denormalize_pixel_coordinates3d", - "source_code": "def denormalize_pixel_coordinates3d(pixel_coordinates: Tensor, depth: int, height: int, width: int, eps: float = 1e-08) -> Tensor: if pixel_coordinates.shape[-1] ! = 3: raise ValueError(f'Input pixel_coordinates must be of shape (*, 3). Got {pixel_coordinates.shape}') dhw: Tensor = stack([tensor(depth), tensor(width), tensor(height)]).to(pixel_coordinates.device).to(pixel_coordinates.dtype) factor: Tensor = tensor(2.0) / (dhw - 1).clamp(eps) return tensor(1.0) / factor * (pixel_coordinates + 1)", - "docstring": "Denormalize pixel coordinates. The input is assumed to be -1 if on extreme left, 1 if on extreme right (x = w-1). Args: pixel_coordinates: the normalized grid coordinates. Shape can be :math:. depth: the maximum depth in the x-axis. height: the maximum height in the y-axis. width: the maximum width in the x-axis. eps: safe division by zero. Return: the denormalized pixel coordinates.", - "type": "function", - "file_path": "kornia\\kornia\\geometry\\conversions.py", - "ast_data": "FunctionDef name:denormalize_pixel_coordinates3d arguments arg:pixel_coordinates type:Tensor arg:depth type:int arg:height type:int arg:width type:int arg:eps type:float If Compare op:NotEq Raise raises:ValueError(f'Input pixel_coordinates must be of shape (*, 3). Got {pixel_coordinates.shape}') Return return:yes" - }, - { - "library": "pytorch", - "name": "all_reduce", - "source_code": "def all_reduce(tensor, op = ReduceOp.SUM, group = group.WORLD): return _AllReduce.apply(op, group, tensor)", - "docstring": "Reduces the tensor data across all machines in such a way that all get the final result. After the call the returned tensor is going to be bitwise identical in all processes. Arguments: tensor (Tensor): Input of the collective. op (optional): One of the values from `` enum. Specifies an operation used for element-wise reductions. group (ProcessGroup, optional): The process group to work on. Returns: Tensor: Output of the collective", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\nn\\functional.py", - "ast_data": "FunctionDef name:all_reduce arguments arg:tensor arg:op arg:group Return return:yes" - }, - { - "library": "django", - "name": "is_internal_request", - "source_code": "def is_internal_request(self, domain, referer): return bool(re.match('^https?: //%s/' % re.escape(domain), referer))", - "docstring": "Return True if the referring URL is the same domain as the current request.", - "type": "method", - "file_path": "django\\django\\middleware\\common.py", - "ast_data": "FunctionDef name:is_internal_request arguments arg:self arg:domain arg:referer Return return:yes" - }, - { - "library": "scipy", - "name": "get_arrays_tol", - "source_code": "def get_arrays_tol(*arrays): if len(arrays) = = 0: raise ValueError('At least one array must be provided.') size = max((array.size for array in arrays)) weight = max((np.max(np.abs(array[np.isfinite(array)]), initial = 1.0) for array in arrays)) return 10.0 * EPS * max(size, 1.0) * weight", - "docstring": "Get a relative tolerance for a set of arrays. Borrowed from COBYQA Parameters ---------- *arrays: tuple Set of to get the tolerance for. Returns ------- float Relative tolerance for the set of arrays. Raises ------ ValueError If no array is provided.", - "type": "function", - "file_path": "scipy\\scipy\\_lib\\pyprima\\pyprima\\src\\pyprima\\common\\linalg.py", - "ast_data": "FunctionDef name:get_arrays_tol arguments vararg:arrays If Compare op:Eq Raise raises:ValueError('At least one array must be provided.') Assign Call call:max Assign Call call:max Return return:yes" - }, - { - "library": "tensorflow", - "name": "tile", - "source_code": "def tile(tensor, tile_assignment, assign_tuple_sharding = False, use_sharding_op = False, unspecified_dims = None): return Sharding.tile(tile_assignment).apply_to_tensor(tensor, assign_tuple_sharding = assign_tuple_sharding, use_sharding_op = use_sharding_op, unspecified_dims = unspecified_dims or [])", - "docstring": "Returns a tensor that has tiled sharding. Args: tensor: A tf.Tensor to shard. tile_assignment: An np.ndarray describing the topology of the tiling and which device will compute which part of the topology. assign_tuple_sharding: If the sharding type should be a tuple. use_sharding_op: If true, adds a sharding op to set the sharding. unspecified_dims: An optional list of dimensions unspecified.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\compiler\\xla\\experimental\\xla_sharding.py", - "ast_data": "FunctionDef name:tile arguments arg:tensor arg:tile_assignment arg:assign_tuple_sharding arg:use_sharding_op arg:unspecified_dims Return return:yes" - }, - { - "library": "pytorch", - "name": "ReturnValueHandler", - "source_code": "class ReturnValueHandler: def __init__(self, lazy_out_list): self.index: list[list[int]] = [] self.total_count = len(lazy_out_list) tensor_id_to_idx: dict[int, int] = {} for dup_idx, lazy_tensor in enumerate(lazy_out_list): uniq_idx = tensor_id_to_idx.get(id(lazy_tensor), None) if uniq_idx is not None: self.index[uniq_idx].append(dup_idx) else: uniq_idx = len(self.index) self.index.append([dup_idx]) tensor_id_to_idx[id(lazy_tensor)] = uniq_idx def duplicate_eager_tensors(self, eager_tensor_list): duplicated_list = [None] * self.total_count assert len(eager_tensor_list) = = len(self.index) for uniq_idx, eager_tensor in enumerate(eager_tensor_list): for dup_idx in self.index[uniq_idx]: duplicated_list[dup_idx] = eager_tensor return duplicated_list", - "docstring": "When ltc_sync_multi is called on multi tensors, the compiled graph will contain output only for unique tensors - if a tensor appears multiple times in the input to _ltc_sync_multi, only the first occurance matters. However from python level, we still expect multi tensors returned with duplciation even if the TS graph dedup the output. e.g. for method: def forward(self, a): return a, a the TS graph captured by LTC will return a single tensor, but Python method expects 2. This class dedup the lazy tensors first to get the index that will be used to duplicate the eager tensors later.", - "type": "class", - "file_path": "pytorch\\torch\\_lazy\\extract_compiled_graph.py", - "ast_data": "ClassDef name:ReturnValueHandler FunctionDef name:__init__ arguments arg:self arg:lazy_out_list Assign Call call:len For Call call:enumerate Assign Call call:get If Compare op:IsNot Assign Call call:len Assign FunctionDef name:duplicate_eager_tensors arguments arg:self arg:eager_tensor_list Assign For Call call:enumerate For Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "copy", - "source_code": "def copy(self, node): if isinstance(node, list): return [self.copy(n) for n in node] elif isinstance(node, tuple): return tuple((self.copy(n) for n in node)) elif not isinstance(node, (gast.AST, ast.AST)): return node assert isinstance(node, (gast.AST, ast.AST)) new_fields = {} for f in node._fields: if not f.startswith('__') and hasattr(node, f): new_fields[f] = self.copy(getattr(node, f)) new_node = type(node)(**new_fields) if self.preserve_annos: for k in self.preserve_annos: anno.copyanno(node, new_node, k) return new_node", - "docstring": "Returns a deep copy of node (excluding some fields, see copy_clean).", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\pyct\\ast_util.py", - "ast_data": "FunctionDef name:copy arguments arg:self arg:node If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes If Return return:yes Assign For If BoolOp Call call:hasattr Assign Call call:copy Assign Call If For Return return:yes" - }, - { - "library": "pygame", - "name": "get_sprites_at", - "source_code": "def get_sprites_at(self, pos): _sprites = self._spritelist rect = Rect(pos, (1, 1)) colliding_idx = rect.collidelistall(_sprites) return [_sprites[i] for i in colliding_idx]", - "docstring": "return a list with all sprites at that position LayeredUpdates.get_sprites_at(pos): return colliding_sprites Bottom sprites are listed first; the top ones are listed last.", - "type": "method", - "file_path": "pygame\\src_py\\sprite.py", - "ast_data": "FunctionDef name:get_sprites_at arguments arg:self arg:pos Assign Assign Call call:Rect Assign Call call:collidelistall Return return:yes" - }, - { - "library": "numpy", - "name": "opt_func_info", - "source_code": "def opt_func_info(func_name = None, signature = None): import re from numpy._core._multiarray_umath import __cpu_targets_info__ as targets from numpy._core._multiarray_umath import dtype if func_name is not None: func_pattern = re.compile(func_name) matching_funcs = {k: v for k, v in targets.items() if func_pattern.search(k)} else: matching_funcs = targets if signature is not None: sig_pattern = re.compile(signature) matching_sigs = {} for k, v in matching_funcs.items(): matching_chars = {} for chars, targets in v.items(): if any((sig_pattern.search(c) or sig_pattern.search(dtype(c).name) for c in chars)): matching_chars[chars] = targets if matching_chars: matching_sigs[k] = matching_chars else: matching_sigs = matching_funcs return matching_sigs", - "docstring": "Returns a dictionary containing the currently supported CPU dispatched features for all optimized functions. Parameters ---------- func_name : str (optional) Regular expression to filter by function name. signature : str (optional) Regular expression to filter by data type. Returns ------- dict A dictionary where keys are optimized function names and values are nested dictionaries indicating supported targets based on data types. Examples -------- Retrieve dispatch information for functions named 'add' or 'sub' and data types 'float64' or 'float32': >>> import numpy as np >>> dict = np.lib.introspect.opt_func_info( ... func_name=\"add|abs\", signature=\"float64|complex64\" ... ) >>> import json >>> print(json.dumps(dict, indent=2)) { \"absolute\": { \"dd\": { \"current\": \"SSE41\", \"available\": \"SSE41 baseline(SSE SSE2 SSE3)\" }, \"Ff\": { \"current\": \"FMA3__AVX2\", \"available\": \"AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)\" }, \"Dd\": { \"current\": \"FMA3__AVX2\", \"available\": \"AVX512F FMA3__AVX2 baseline(SSE SSE2 SSE3)\" } }, \"add\": { \"ddd\": { \"current\": \"FMA3__AVX2\", \"available\": \"FMA3__AVX2 baseline(SSE SSE2 SSE3)\" }, \"FFF\": { \"current\": \"FMA3__AVX2\", \"available\": \"FMA3__AVX2 baseline(SSE SSE2 SSE3)\" } } }", - "type": "function", - "file_path": "numpy\\numpy\\lib\\introspect.py", - "ast_data": "FunctionDef name:opt_func_info arguments arg:func_name arg:signature If Compare op:IsNot Assign Call call:compile Assign Assign If Compare op:IsNot Assign Call call:compile Assign For Call call:items Assign For Call call:items If Call call:any Assign If Assign Assign Return return:yes" - }, - { - "library": "scikit-learn", - "name": "l1_min_c", - "source_code": "@validate_params({'X': ['array-like', 'sparse matrix'], 'y': ['array-like'], 'loss': [StrOptions({'squared_hinge', 'log'})], 'fit_intercept': ['boolean'], 'intercept_scaling': [Interval(Real, 0, None, closed = 'neither')]}, prefer_skip_nested_validation = True) def l1_min_c(X, y, *, loss = 'squared_hinge', fit_intercept = True, intercept_scaling = 1.0): X = check_array(X, accept_sparse = 'csc') check_consistent_length(X, y) Y = LabelBinarizer(neg_label = -1).fit_transform(y).T den = np.max(np.abs(safe_sparse_dot(Y, X))) if fit_intercept: bias = np.full((np.size(y), 1), intercept_scaling, dtype = np.array(intercept_scaling).dtype) den = max(den, abs(np.dot(Y, bias)).max()) if den = = 0.0: raise ValueError('Ill-posed l1_min_c calculation: l1 will always select zero coefficients for this data') if loss = = 'squared_hinge': return 0.5 / den else: return 2.0 / den", - "docstring": "Return the lowest bound for . The lower bound for is computed such that for in the model is guaranteed not to be empty. This applies to l1 penalized classifiers, such as :class: with penalty='l1' and :class: with penalty='l1'. This value is valid if parameter in is not set. For an example of how to use this function, see :ref:. Parameters ---------- X : {array-like, sparse matrix} of shape (n_samples, n_features) Training vector, where is the number of samples and is the number of features. y : array-like of shape (n_samples,) Target vector relative to X. loss : {'squared_hinge', 'log'}, default='squared_hinge' Specifies the loss function. With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss). With 'log' it is the loss of logistic regression models. fit_intercept : bool, default=True Specifies if the intercept should be fitted by the model. It must match the fit() method parameter. intercept_scaling : float, default=1.0 When fit_intercept is True, instance vector x becomes [x, intercept_scaling], i.e. a \"synthetic\" feature with constant value equals to intercept_scaling is appended to the instance vector. It must match the fit() method parameter. Returns ------- l1_min_c : float Minimum value for C. Examples -------- >>> from sklearn.svm import l1_min_c >>> from sklearn.datasets import make_classification >>> X, y = make_classification(n_samples=100, n_features=20, random_state=42) >>> print(f\"{l1_min_c(X, y, loss='squared_hinge', fit_intercept=True):.4f}\") 0.0044", - "type": "function", - "file_path": "scikit-learn\\sklearn\\svm\\_bounds.py", - "ast_data": "FunctionDef name:l1_min_c arguments arg:X arg:y Call call:validate_params Assign Call call:check_array Assign Assign Call call:max If Assign Call call:full Assign Call call:max If Compare op:Eq Raise raises:ValueError('Ill-posed l1_min_c calculation: l1 will always select zero coefficients for this data') If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "algorithms", - "name": "panagram", - "source_code": "def panagram(string): letters = set(ascii_lowercase) for c in string: try: letters.remove(c.lower()) except: pass return len(letters) = = 0", - "docstring": "Returns whether the input string is an English panagram or not. Parameters: string (str): A sentence in the form of a string. Returns: A boolean with the result.", - "type": "function", - "file_path": "algorithms\\algorithms\\strings\\panagram.py", - "ast_data": "FunctionDef name:panagram arguments arg:string Assign Call call:set For Try ExceptHandler Return return:yes" - }, - { - "library": "pytorch", - "name": "permute_tensor", - "source_code": "def permute_tensor(self: torch.Tensor, src_dst: list[int], group: RANK_TYPES, tag: str = '') -> torch.Tensor: t, rankset, group_size = _expand_group(group, tag) local_pg = c10d._find_or_create_pg_by_ranks_and_tag(t, rankset, group_size) output_split_sizes = [0] * group_size input_split_sizes = [0] * group_size for src, dst in enumerate(src_dst): if src = = dist.get_rank(local_pg): input_split_sizes[dst] = self.numel() if dst = = dist.get_rank(local_pg): output_split_sizes[src] = self.numel() return all_to_all_single(self, output_split_sizes, input_split_sizes, group, tag)", - "docstring": "Permutes the elements of the tensor according to the given source/destination pairs. should be defined such that src_dst[m] == n means m sends to n. Group can be one of: List[int]: ranks participating in the collective. List[List[int]]: 2D mesh of ranks taking part of this collective in MPMD. ProcessGroup: Will perform a collective using the ranks and tag of the PG. DeviceMesh: Do a SPMD collective over all ranks of the mesh (DeviceMesh, int): Do a MPMD collective over one", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\_functional_collectives.py", - "ast_data": "FunctionDef name:permute_tensor arguments arg:self type:torch.Tensor arg:src_dst type:list[int] arg:group type:RANK_TYPES arg:tag type:str Assign Call call:_expand_group Assign Call call:_find_or_create_pg_by_ranks_and_tag Assign Assign For Call call:enumerate If Compare op:Eq Assign Call call:numel If Compare op:Eq Assign Call call:numel Return return:yes" - }, - { - "library": "scikit-learn", - "name": "__sklearn_is_fitted__", - "source_code": "def __sklearn_is_fitted__(self): last_step = None for _, estimator in reversed(self.steps): if estimator ! = 'passthrough': last_step = estimator break if last_step is None: return True try: check_is_fitted(last_step) return True except NotFittedError: return False", - "docstring": "Indicate whether pipeline has been fit. This is done by checking whether the last non- step of the pipeline is fitted. An empty pipeline is considered fitted.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\pipeline.py", - "ast_data": "FunctionDef name:__sklearn_is_fitted__ arguments arg:self Assign For Call call:reversed If Compare op:NotEq Assign If Compare op:Is Return return:yes Try Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "django", - "name": "to_python", - "source_code": "def to_python(self, value): if value in self.empty_values: return None if isinstance(value, datetime.datetime): return from_current_timezone(value) if isinstance(value, datetime.date): result = datetime.datetime(value.year, value.month, value.day) return from_current_timezone(result) try: result = parse_datetime(value.strip()) except ValueError: raise ValidationError(self.error_messages['invalid'], code = 'invalid') if not result: result = super().to_python(value) return from_current_timezone(result)", - "docstring": "Validate that the input can be converted to a datetime. Return a Python datetime.datetime object.", - "type": "method", - "file_path": "django\\django\\forms\\fields.py", - "ast_data": "FunctionDef name:to_python arguments arg:self arg:value If Compare op:In Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Assign Call call:datetime Return return:yes Try Assign Call call:parse_datetime ExceptHandler Raise raises:ValidationError(self.error_messages['invalid'], code='invalid') If Assign Call call:to_python Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, experimental_debug_info_func): super(TFLiteConverterBaseV1, self).__init__() self.inference_type = _dtypes.float32 self.inference_input_type = None self.inference_output_type = None self.output_format = constants.TFLITE self.quantized_input_stats = {} self.default_ranges_stats = None self.drop_control_dependency = True self.reorder_across_fake_quant = False self.change_concat_input_ranges = False self.dump_graphviz_dir = None self.dump_graphviz_video = False self.conversion_summary_dir = None self._debug_info_func = experimental_debug_info_func self._metadata.environment.apiVersion = 1", - "docstring": "Constructor for TFLiteConverter. Args: experimental_debug_info_func: An experimental function to retrieve the graph debug info for a set of nodes from the .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:experimental_debug_info_func Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign Assign" - }, - { - "library": "scipy", - "name": "Levy13", - "source_code": "class Levy13(Benchmark): def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N)) self.custom_bounds = [(-5, 5), (-5, 5)] self.global_optimum = [[1 for _ in range(self.N)]] self.fglob = 0.0 def fun(self, x, *args): self.nfev + = 1 u = sin(3 * pi * x[0]) ** 2 v = (x[0] - 1) ** 2 * (1 + sin(3 * pi * x[1]) ** 2) w = (x[1] - 1) ** 2 * (1 + sin(2 * pi * x[1]) ** 2) return u + v + w", - "docstring": "Levy13 objective function. This class defines the Levy13 [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Levy13}}(x) = \\left(x_{1} -1\\right)^{2} \\left[\\sin^{2} \\left(3 \\pi x_{2}\\right) + 1\\right] + \\left(x_{2} - 1\\right)^{2} \\left[\\sin^{2}\\left(2 \\pi x_{2}\\right) + 1\\right] + \\sin^{2}\\left(3 \\pi x_{1}\\right) with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Mishra, S. Some new test functions for global optimization and performance of repulsive particle swarm method. Munich Personal RePEc Archive, 2006, 2718", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_L.py", - "ast_data": "ClassDef name:Levy13 FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Assign Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, dataset): self.dataset = dataset elem_spec = self.dataset.element_spec _check_table_initializer_element_spec(elem_spec) key_type = elem_spec[0].dtype value_type = elem_spec[1].dtype super(DatasetInitializer, self).__init__(key_type, value_type)", - "docstring": "Creates a table initializer from a . Args: dataset: A object that produces tuples of scalars. The first scalar is treated as a key and the second as value. Raises: ValueError if doesn't conform to specifications. Returns: A object", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\lookup_ops.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:dataset Assign Assign Assign Assign" - }, - { - "library": "mongo", - "name": "encrypt_expression", - "source_code": "async def encrypt_expression(self, expression: Mapping[str, Any], algorithm: str, key_id: Optional[Union[Binary, uuid.UUID]] = None, key_alt_name: Optional[str] = None, query_type: Optional[str] = None, contention_factor: Optional[int] = None, range_opts: Optional[RangeOpts] = None) -> RawBSONDocument: return cast(RawBSONDocument, await self._encrypt_helper(value = expression, algorithm = algorithm, key_id = key_id, key_alt_name = key_alt_name, query_type = query_type, contention_factor = contention_factor, range_opts = range_opts, is_expression = True))", - "docstring": "Encrypt a BSON expression with a given key and algorithm. Note that exactly one of ` (string): The encryption algorithm to use. See :class: for some valid options. :param key_id: Identifies a data key by `~bson.binary.Binary~bson.binary.UUID_SUBTYPE (str): The query type to execute. See :class: for valid options. :param contention_factorAlgorithm.INDEXEDAlgorithm.INDEXEDrangeRangeOpts~bson.RawBSONDocumentrange_optsuuid.UUID`. .. versionadded:: 4.4", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\encryption.py", - "ast_data": "AsyncFunctionDef name:encrypt_expression arguments arg:self arg:expression type:Mapping[str, Any] arg:algorithm type:str arg:key_id type:Optional[Union[Binary, uuid.UUID]] arg:key_alt_name type:Optional[str] arg:query_type type:Optional[str] arg:contention_factor type:Optional[int] arg:range_opts type:Optional[RangeOpts] Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_current_fig_manager", - "source_code": "def get_current_fig_manager() -> FigureManagerBase | None: return gcf().canvas.manager", - "docstring": "Return the figure manager of the current figure. The figure manager is a container for the actual backend-depended window that displays the figure on screen. If no current figure exists, a new one is created, and its figure manager is returned. Returns ------- or backend-dependent subclass thereof", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", - "ast_data": "FunctionDef name:get_current_fig_manager arguments Return return:yes" - }, - { - "library": "kornia", - "name": "inverse_transform", - "source_code": "def inverse_transform(self, x: Tensor) -> Tensor: if not self.fitted: raise RuntimeError('Needs to be fitted first before running. Please call fit or set include_fit to True.') if not self.compute_inv: raise RuntimeError('Did not compute inverse ZCA. Please set compute_inv to True') if self.transform_inv is None: raise TypeError('The transform inverse should be a Tensor. Gotcha None.') mean_inv: Tensor = -self.mean_vector.mm(self.transform_matrix) y = linear_transform(x, self.transform_inv, mean_inv) return y", - "docstring": "Apply the inverse transform to the whitened data. Args: x: Whitened data. Returns: Original data.", - "type": "method", - "file_path": "kornia\\kornia\\enhance\\zca.py", - "ast_data": "FunctionDef name:inverse_transform arguments arg:self arg:x type:Tensor If Raise raises:RuntimeError('Needs to be fitted first before running. Please call fit or set include_fit to True.') If Raise raises:RuntimeError('Did not compute inverse ZCA. Please set compute_inv to True') If Compare op:Is Raise raises:TypeError('The transform inverse should be a Tensor. Gotcha None.') Assign Call call:linear_transform Return return:yes" - }, - { - "library": "pytorch", - "name": "__init__", - "source_code": "def __init__(self, name: str, input_nodes: list[Buffer], layout: Layout, input_reorder: Optional[list[int]] = None) -> None: super().__init__(name) self.input_nodes = input_nodes self.output_node: Buffer = Buffer(name = 'buf_out', layout = layout) self.input_reorder = input_reorder self.layout = layout", - "docstring": "Baseclass for CUDA C++ Templates, derived from KernelTemplate. Not to be instantiated directly. Args: name (str): The name of the CUDATemplate object. input_nodes (List[IRNode]): A list of input IRNodes. layout (Layout): The layout of the output buffer / tensor. input_reorder (Optional[List[int]]): An optional list that specifies the order of the input nodes.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\cuda\\cuda_template.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:name type:str arg:input_nodes type:list[Buffer] arg:layout type:Layout arg:input_reorder type:Optional[list[int]] Assign Assign Assign" - }, - { - "library": "scikit-learn", - "name": "transform", - "source_code": "def transform(self, X): X = validate_data(self, X, accept_sparse = 'csr', reset = False, ensure_non_negative = True) sparse = sp.issparse(X) if self.sample_interval is None: if self.sample_steps = = 1: sample_interval = 0.8 elif self.sample_steps = = 2: sample_interval = 0.5 elif self.sample_steps = = 3: sample_interval = 0.4 else: raise ValueError('If sample_steps is not in [1, 2, 3], you need to provide sample_interval') else: sample_interval = self.sample_interval transf = self._transform_sparse if sparse else self._transform_dense return transf(X, self.sample_steps, sample_interval)", - "docstring": "Apply approximate feature map to X. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. Returns ------- X_new : {ndarray, sparse matrix}, shape = (n_samples, n_features * (2*sample_steps - 1)) Whether the return value is an array or sparse matrix depends on the type of the input X.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\kernel_approximation.py", - "ast_data": "FunctionDef name:transform arguments arg:self arg:X Assign Call call:validate_data Assign Call call:issparse If Compare op:Is If Compare op:Eq Assign If Compare op:Eq Assign If Compare op:Eq Assign Raise raises:ValueError('If sample_steps is not in [1, 2, 3], you need to provide sample_interval') Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "step", - "source_code": "@_use_grad_for_differentiable def step(self, closure = None): self._cuda_graph_capture_health_check() loss = None if closure is not None: with torch.enable_grad(): loss = closure() for group in self.param_groups: params_with_grad: list[Tensor] = [] grads: list[Tensor] = [] exp_avgs: list[Tensor] = [] exp_avg_sqs: list[Tensor] = [] state_steps: list[Tensor] = [] beta1, beta2 = cast(tuple[float, float], group['betas']) has_complex = self._init_group(group, params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps) radam(params_with_grad, grads, exp_avgs, exp_avg_sqs, state_steps, beta1 = beta1, beta2 = beta2, lr = group['lr'], weight_decay = group['weight_decay'], eps = group['eps'], maximize = group['maximize'], foreach = group['foreach'], capturable = group['capturable'], differentiable = group['differentiable'], decoupled_weight_decay = group['decoupled_weight_decay'], has_complex = has_complex) return loss", - "docstring": "Perform a single optimization step. Args: closure (Callable, optional): A closure that reevaluates the model and returns the loss.", - "type": "method", - "file_path": "pytorch\\torch\\optim\\radam.py", - "ast_data": "FunctionDef name:step arguments arg:self arg:closure Assign If Compare op:IsNot With Assign Call call:closure For Assign Call call:cast Assign Call call:_init_group Return return:yes" - }, - { - "library": "tensorflow", - "name": "batch_shape_tensor", - "source_code": "def batch_shape_tensor(self, name = 'batch_shape_tensor'): with self._name_scope(name): return self._batch_shape_tensor()", - "docstring": "Shape of batch dimensions of this operator, determined at runtime. If this operator acts like the batch matrix with , then this returns a holding . Args: name: A name for this . Returns:", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\linalg\\linear_operator.py", - "ast_data": "FunctionDef name:batch_shape_tensor arguments arg:self arg:name With Return return:yes" - }, - { - "library": "django", - "name": "StaticFilesHandler", - "source_code": "class StaticFilesHandler(StaticFilesHandlerMixin, WSGIHandler): def __init__(self, application): self.application = application self.base_url = urlparse(self.get_base_url()) super().__init__() def __call__(self, environ, start_response): if not self._should_handle(get_path_info(environ)): return self.application(environ, start_response) return super().__call__(environ, start_response)", - "docstring": "WSGI middleware that intercepts calls to the static files directory, as defined by the STATIC_URL setting, and serves those files.", - "type": "class", - "file_path": "django\\django\\contrib\\staticfiles\\handlers.py", - "ast_data": "ClassDef name:StaticFilesHandler FunctionDef name:__init__ arguments arg:self arg:application Assign Assign Call call:urlparse FunctionDef name:__call__ arguments arg:self arg:environ arg:start_response If Return return:yes Return return:yes" - }, - { - "library": "cherrypy", - "name": "process_headers", - "source_code": "def process_headers(self): headers = self.headers for name, value in self.header_list: name = name.title() value = value.strip() headers[name] = httputil.decode_TEXT_maybe(value) if name = = 'Cookie': try: self.cookie.load(value) except CookieError as exc: raise cherrypy.HTTPError(400, str(exc)) if not dict.__contains__(headers, 'Host'): if self.protocol > = (1, 1): msg = \"HTTP/1.1 requires a 'Host' request header.\" raise cherrypy.HTTPError(400, msg) else: headers['Host'] = httputil.SanitizedHost(dict.get(headers, 'Host')) host = dict.get(headers, 'Host') if not host: host = self.local.name or self.local.ip self.base = '%s: //%s' % (self.scheme, host)", - "docstring": "Parse HTTP header data into Python structures. (Core)", - "type": "method", - "file_path": "cherrypy\\cherrypy\\_cprequest.py", - "ast_data": "FunctionDef name:process_headers arguments arg:self Assign For Assign Call call:title Assign Call call:strip Assign Call call:decode_TEXT_maybe If Compare op:Eq Try ExceptHandler Raise raises:cherrypy.HTTPError(400, str(exc)) If If Compare op:GtE Assign Raise raises:cherrypy.HTTPError(400, msg) Assign Call call:SanitizedHost Assign Call call:get If Assign BoolOp Assign" - }, - { - "library": "pytorch", - "name": "tanhshrink", - "source_code": "def tanhshrink(input): if has_torch_function_unary(input): return handle_torch_function(tanhshrink, (input,), input) return input - input.tanh()", - "docstring": "tanhshrink(input) -> Tensor Applies element-wise, :math: See :class: for more details.", - "type": "function", - "file_path": "pytorch\\torch\\nn\\functional.py", - "ast_data": "FunctionDef name:tanhshrink arguments arg:input If Call call:has_torch_function_unary Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "generate_dequeue_op", - "source_code": "def generate_dequeue_op(self, tpu_device = 0): self.freeze() if self._generated_dequeue_op and (not ops.inside_function()): raise ValueError(\"Can't generate two dequeue Ops from the same queue\") self._generated_dequeue_op = True full_name = '%s/dequeue' % self._name sharded_shapes = [policy.get_sharded_shape(shape) for shape, policy in zip(self._tuple_shapes, self._sharding_policies)] with ops.device(tpu_name_util.core(tpu_device)): values = tpu_ops.infeed_dequeue_tuple(dtypes = self._tuple_types, shapes = sharded_shapes, name = full_name) return tag_sharding_attribute_for_dequeued_tensors(values, self._input_partition_dims)", - "docstring": "Generate TPU dequeue ops. Args: tpu_device: The TPU device ordinal where the infeed instruction should be placed. Returns: A list of Outputs corresponding to a partition of infeed dequeued into XLA, suitable for use within a replicated block. Raises: ValueError: if the types or shapes of the tuple elements have not been set; or if a dequeue op has already been generated.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_feed.py", - "ast_data": "FunctionDef name:generate_dequeue_op arguments arg:self arg:tpu_device If BoolOp Raise raises:ValueError(\"Can't generate two dequeue Ops from the same queue\") Assign Assign Assign With Assign Call call:infeed_dequeue_tuple Return return:yes" - }, - { - "library": "flexx", - "name": "set_log_level", - "source_code": "def set_log_level(level, match = None): if isinstance(level, str): level = level.lower() if level not in logging_types: raise ValueError('Invalid argument \"%s\"' % level) level = logging_types[level] elif not isinstance(level, int): raise TypeError('log level must be an int or string') logger.setLevel(level) _filter.match = match _formatter.prepend_caller = level < = logging.DEBUG", - "docstring": "Set the logging level and match filter Parameters: level (str, int): The verbosity of messages to print. If a str, it can be either DEBUG, INFO, WARNING, ERROR, or CRITICAL. Note that these are for convenience and are equivalent to passing in logging.DEBUG, etc. match (str, regexp, None): String to match. Only those messages that contain `` option is used, a small overhead is added to each logged message.", - "type": "function", - "file_path": "flexx\\flexx\\util\\logging.py", - "ast_data": "FunctionDef name:set_log_level arguments arg:level arg:match If Call call:isinstance Assign Call call:lower If Compare op:NotIn Raise raises:ValueError('Invalid argument \"%s\"' % level) Assign If Raise raises:TypeError('log level must be an int or string') Assign Assign Compare op:LtE" - }, - { - "library": "tensorflow", - "name": "range_input_producer", - "source_code": "@tf_export(v1 = ['train.range_input_producer']) @deprecation.deprecated(None, 'Queue-based input pipelines have been replaced by `tf.data`. Use `tf.data.Dataset.range(limit).shuffle(limit).repeat(num_epochs)`. If `shuffle = False`, omit the `.shuffle(...)`.') def range_input_producer(limit, num_epochs = None, shuffle = True, seed = None, capacity = 32, shared_name = None, name = None): with ops.name_scope(name, 'input_producer', [limit]) as name: range_tensor = math_ops.range(limit) return input_producer(range_tensor, [], num_epochs, shuffle, seed, capacity, shared_name, 'fraction_of_%d_full' % capacity, name)", - "docstring": "Produces the integers from 0 to limit-1 in a queue. Note: if is not , this function creates local counter . Use to initialize local variables. Args: limit: An int32 scalar tensor. num_epochs: An integer (optional). If specified, produces each integer times before generating an OutOfRange error. If not specified, can cycle through the integers an unlimited number of times. shuffle: Boolean. If true, the integers are randomly shuffled within each epoch. seed: An integer (optional). Seed used if shuffle == True. capacity: An integer. Sets the queue capacity. shared_name: (optional). If set, this queue will be shared under the given name across multiple sessions. name: A name for the operations (optional). Returns: A Queue with the output integers. A for the Queue is added to the current 's collection. @compatibility(eager) Input pipelines based on Queues are not supported when eager execution is enabled. Please use the API to ingest data under eager execution. @end_compatibility", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\training\\input.py", - "ast_data": "FunctionDef name:range_input_producer arguments arg:limit arg:num_epochs arg:shuffle arg:seed arg:capacity arg:shared_name arg:name Call call:tf_export Call call:deprecated With Assign Call call:range Return return:yes" - }, - { - "library": "django", - "name": "get_relations", - "source_code": "def get_relations(self, cursor, table_name): cursor.execute('\\n SELECT column_name, referenced_column_name, referenced_table_name\\n FROM information_schema.key_column_usage\\n WHERE table_name = %s\\n AND table_schema = DATABASE()\\n AND referenced_table_schema = DATABASE()\\n AND referenced_table_name IS NOT NULL\\n AND referenced_column_name IS NOT NULL\\n ', [table_name]) return {field_name: (other_field, other_table) for field_name, other_field, other_table in cursor.fetchall()}", - "docstring": "Return a dictionary of {field_name: (field_name_other_table, other_table)} representing all foreign keys in the given table.", - "type": "method", - "file_path": "django\\django\\db\\backends\\mysql\\introspection.py", - "ast_data": "FunctionDef name:get_relations arguments arg:self arg:cursor arg:table_name Return return:yes" - }, - { - "library": "pytorch", - "name": "amax", - "source_code": "@_apply_docstring_templates def amax(input: Union[Tensor, MaskedTensor], dim: DimOrDims = None, *, keepdim: Optional[bool] = False, dtype: Optional[DType] = None, mask: Optional[Tensor] = None) -> Tensor: if dtype is None: dtype = input.dtype mask_input = _combine_input_and_mask(amax, input, mask) dim_ = _canonical_dim(dim, mask_input.ndim) if mask_input.layout = = torch.strided: return torch.amax(mask_input, dim_, bool(keepdim)).to(dtype = dtype) elif mask_input.layout = = torch.sparse_coo: if mask is None: raise ValueError('masked amax expects explicit mask for sparse_coo tensor input') return _sparse_coo_scatter_reduction_helper(torch.amax, mask_input, dim_, bool(keepdim), dtype) elif mask_input.layout = = torch.sparse_csr: if mask is None: raise ValueError('masked amax expects explicit mask for sparse_csr tensor input') return _sparse_csr_segment_reduction_helper(torch.amax, mask_input, dim_, bool(keepdim), dtype) else: raise ValueError(f'masked amax expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)')", - "docstring": "{reduction_signature} {reduction_descr} {reduction_identity_dtype} {reduction_args} {reduction_example}", - "type": "function", - "file_path": "pytorch\\torch\\masked\\_ops.py", - "ast_data": "FunctionDef name:amax arguments arg:input type:Union[Tensor, MaskedTensor] arg:dim type:DimOrDims If Compare op:Is Assign Assign Call call:_combine_input_and_mask Assign Call call:_canonical_dim If Compare op:Eq Return return:yes If Compare op:Eq If Compare op:Is Raise raises:ValueError('masked amax expects explicit mask for sparse_coo tensor input') Return return:yes If Compare op:Eq If Compare op:Is Raise raises:ValueError('masked amax expects explicit mask for sparse_csr tensor input') Return return:yes Raise raises:ValueError(f'masked amax expects strided, sparse_coo or sparse_csr tensor (got {mask_input.layout} tensor)')" - }, - { - "library": "pytorch", - "name": "ExplainOutput", - "source_code": "@dataclasses.dataclass class ExplainOutput: graphs: list[torch.fx.GraphModule] graph_count: int graph_break_count: int break_reasons: list[Any] op_count: int ops_per_graph: Optional[list[torch.fx.Node]] = None out_guards: Optional[list[_guards.Guard]] = None compile_times: Optional[str] = None def __str__(self) -> str: output = f'Graph Count: {self.graph_count}\\n' output + = f'Graph Break Count: {self.graph_break_count}\\n' output + = f'Op Count: {self.op_count}\\n' output + = 'Break Reasons: \\n' for idx, break_reason in enumerate(self.break_reasons): output + = f' Break Reason {idx + 1}: \\n' output + = f' Reason: {break_reason.reason}\\n' output + = ' User Stack: \\n' for frame_summary in break_reason.user_stack: output + = f' {frame_summary}\\n' if self.ops_per_graph is not None: output + = 'Ops per Graph: \\n' for idx, ops in enumerate(self.ops_per_graph): output + = f' Ops {idx + 1}: \\n' for op in ops: output + = f' {op}\\n' if self.out_guards is not None: output + = 'Out Guards: \\n' for i, guard in enumerate(self.out_guards): output + = f' Guard {i + 1}: \\n' output + = f' {str(guard)}' if self.compile_times is not None: output + = f'Compile Times: {self.compile_times}\\n' return output", - "docstring": "This is the output of :func: There is no reason to create this class directly.", - "type": "class", - "file_path": "pytorch\\torch\\_dynamo\\backends\\debugging.py", - "ast_data": "ClassDef name:ExplainOutput FunctionDef name:__str__ arguments arg:self Assign For Call call:enumerate For If Compare op:IsNot For Call call:enumerate For If Compare op:IsNot For Call call:enumerate If Compare op:IsNot Return return:yes" - }, - { - "library": "django", - "name": "force_bytes", - "source_code": "def force_bytes(s, encoding = 'utf-8', strings_only = False, errors = 'strict'): if isinstance(s, bytes): if encoding = = 'utf-8': return s else: return s.decode('utf-8', errors).encode(encoding, errors) if strings_only and is_protected_type(s): return s if isinstance(s, memoryview): return bytes(s) return str(s).encode(encoding, errors)", - "docstring": "Similar to smart_bytes, except that lazy instances are resolved to strings, rather than kept as lazy objects. If strings_only is True, don't convert (some) non-string-like objects.", - "type": "function", - "file_path": "django\\django\\utils\\encoding.py", - "ast_data": "FunctionDef name:force_bytes arguments arg:s arg:encoding arg:strings_only arg:errors If Call call:isinstance If Compare op:Eq Return return:yes Return return:yes If BoolOp Call call:is_protected_type Return return:yes If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "cast_to_floatx", - "source_code": "@dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def cast_to_floatx(x): if isinstance(x, (tensor_lib.Tensor, variables_module.Variable, sparse_tensor.SparseTensor)): return math_ops.cast(x, dtype = floatx()) return numpy_compat.np_asarray(x, dtype = floatx())", - "docstring": "Cast a Numpy array to the default Keras float type. Args: x: Numpy array or TensorFlow tensor. Returns: The same array (Numpy array if was a Numpy array, or TensorFlow tensor if was a tensor), cast to its new type. Example: >>> tf.keras.backend.floatx() 'float32' >>> arr = np.array([1.0, 2.0], dtype='float64') >>> arr.dtype dtype('float64') >>> new_arr = cast_to_floatx(arr) >>> new_arr array([1., 2.], dtype=float32) >>> new_arr.dtype dtype('float32')", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", - "ast_data": "FunctionDef name:cast_to_floatx arguments arg:x If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "run_frozen_optimizations", - "source_code": "def run_frozen_optimizations(mod, optimize_numerics: bool = True, preserved_methods: Optional[list[str]] = None): if mod._c._has_method('forward'): torch._C._jit_pass_optimize_frozen_graph(mod.graph, optimize_numerics) if preserved_methods is None: preserved_methods = [] for method in preserved_methods: torch._C._jit_pass_optimize_frozen_graph(mod.__getattr__(method).graph, optimize_numerics)", - "docstring": "Run a series of optimizations looking for patterns that occur in frozen graphs. The current set of optimizations includes: - Dropout Removal - Pretranspose Linear Layers - Concat Linear Layers with same input Tensor - Conv -> Batchnorm folding - Conv -> Add/Sub folding - Conv -> Mul/Div folding Args: mod (:class:): a frozen module to be optimized optimize_numerics (bool): If `torch.testing.assert_closeassert_close` tolerance. Conv -> Batchnorm folding, Conv-Add/Sub, and Conv -> Mul/Div folding all may alter numerics. Returns: None Note: In rare occassions, this can result in slower execution. Example (Freezing a module with Conv->Batchnorm) .. code-block:: python import torch in_channels, out_channels = 3, 32 conv = torch.nn.Conv2d( in_channels, out_channels, kernel_size=3, stride=2, bias=True ) bn = torch.nn.BatchNorm2d(out_channels, eps=0.001) mod = torch.nn.Sequential(conv, bn) # set optimize to False here, by default freezing runs run_frozen_optimizations frozen_mod = torch.jit.freeze(torch.jit.script(mod.eval()), optimize=False) # inspect frozen mod assert \"batch_norm\" in str(frozen_mod.graph) torch.jit.run_frozen_optimizations(frozen_mod) assert \"batch_norm\" not in str(frozen_mod.graph)", - "type": "function", - "file_path": "pytorch\\torch\\jit\\_freeze.py", - "ast_data": "FunctionDef name:run_frozen_optimizations arguments arg:mod arg:optimize_numerics type:bool arg:preserved_methods type:Optional[list[str]] If Call call:_has_method If Compare op:Is Assign For" - }, - { - "library": "scikit-learn", - "name": "decision_function", - "source_code": "def decision_function(self, X): return super().decision_function(X)", - "docstring": "Apply decision function to an array of samples. The decision function is equal (up to a constant factor) to the log-posterior of the model, i.e. . In a binary classification setting this instead corresponds to the difference . See :ref:. Parameters ---------- X : array-like of shape (n_samples, n_features) Array of samples (test vectors). Returns ------- C : ndarray of shape (n_samples,) or (n_samples, n_classes) Decision function values related to each class, per sample. In the two-class case, the shape is , giving the log likelihood ratio of the positive class.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\discriminant_analysis.py", - "ast_data": "FunctionDef name:decision_function arguments arg:self arg:X Return return:yes" - }, - { - "library": "matplotlib", - "name": "set", - "source_code": "def set(self, *, h_pad = None, w_pad = None, hspace = None, wspace = None, rect = None): for td in self.set.__kwdefaults__: if locals()[td] is not None: self._params[td] = locals()[td]", - "docstring": "Set the pads for constrained_layout. Parameters ---------- h_pad, w_pad : float Padding around the Axes elements in inches. Default to :rc: and :rc:. hspace, wspace : float Fraction of the figure to dedicate to space between the axes. These are evenly spread between the gaps between the Axes. A value of 0.2 for a three-column layout would have a space of 0.1 of the figure width between each column. If h/wspace < h/w_pad, then the pads are used instead. Default to :rc: and :rc:. rect : tuple of 4 floats Rectangle in figure coordinates to perform constrained layout in (left, bottom, width, height), each from 0-1.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\layout_engine.py", - "ast_data": "FunctionDef name:set arguments arg:self For If Compare op:IsNot Assign" - }, - { - "library": "pandas", - "name": "from_blocks", - "source_code": "@classmethod def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> Self: return cls(blocks, axes, verify_integrity = False)", - "docstring": "Constructor for BlockManager and SingleBlockManager with same signature.", - "type": "method", - "file_path": "pandas\\pandas\\core\\internals\\managers.py", - "ast_data": "FunctionDef name:from_blocks arguments arg:cls arg:blocks type:list[Block] arg:axes type:list[Index] Return return:yes" - }, - { - "library": "tensorflow", - "name": "copy", - "source_code": "def copy(self, **override_parameters_kwargs): parameters = dict(self.parameters, **override_parameters_kwargs) return type(self)(**parameters)", - "docstring": "Creates a deep copy of the distribution. Note: the copy distribution may continue to depend on the original initialization arguments. Args: **override_parameters_kwargs: String/value dictionary of initialization arguments to override with new values. Returns: distribution: A new instance of initialized from the union of self.parameters and override_parameters_kwargs, i.e., .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\distributions\\distribution.py", - "ast_data": "FunctionDef name:copy arguments arg:self kwarg:override_parameters_kwargs Assign Call call:dict Return return:yes" - }, - { - "library": "mongo", - "name": "common_wire_version", - "source_code": "@property def common_wire_version(self) -> Optional[int]: servers = self.known_servers if servers: return min((s.max_wire_version for s in self.known_servers)) return None", - "docstring": "Minimum of all servers' max wire versions, or None.", - "type": "method", - "file_path": "mongo\\pymongo\\topology_description.py", - "ast_data": "FunctionDef name:common_wire_version arguments arg:self Assign If Return return:yes Return return:yes" - }, - { - "library": "scikit-learn", - "name": "score", - "source_code": "def score(self, X, y = None): return np.mean(self.score_samples(X))", - "docstring": "Compute the average log-likelihood of the samples. Parameters ---------- X : ndarray of shape (n_samples, n_features) The data. y : Ignored Ignored parameter. Returns ------- ll : float Average log-likelihood of the samples under the current model.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\decomposition\\_factor_analysis.py", - "ast_data": "FunctionDef name:score arguments arg:self arg:X arg:y Return return:yes" - }, - { - "library": "tensorflow", - "name": "embedding_tables", - "source_code": "@property def embedding_tables(self) -> Dict[tpu_embedding_v2_utils.TableConfig, tf_variables.Variable]: self._maybe_build() return {table: self._variables[table.name]['parameters'] for table in self._table_config}", - "docstring": "Returns a dict of embedding tables, keyed by .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_for_serving.py", - "ast_data": "FunctionDef name:embedding_tables arguments arg:self Return return:yes" - }, - { - "library": "scikit-learn", - "name": "check_estimators_unfitted", - "source_code": "@ignore_warnings def check_estimators_unfitted(name, estimator_orig): err_msg = 'Estimator should raise a NotFittedError when calling `{method}` before fit. Either call `check_is_fitted(self)` at the beginning of `{method}` or set `tags.requires_fit = False` on estimator tags to disable this check.\\n- `check_is_fitted`: https: //scikit-learn.org/dev/modules/generated/sklearn.utils.validation.check_is_fitted.html\\n- Estimator Tags: https: //scikit-learn.org/dev/developers/develop.html#estimator-tags' X, y = _regression_dataset() estimator = clone(estimator_orig) for method in ('decision_function', 'predict', 'predict_proba', 'predict_log_proba'): if hasattr(estimator, method): with raises(NotFittedError, err_msg = err_msg.format(method = method)): getattr(estimator, method)(X)", - "docstring": "Check that predict raises an exception in an unfitted estimator. Unfitted estimators should raise a NotFittedError.", - "type": "function", - "file_path": "scikit-learn\\sklearn\\utils\\estimator_checks.py", - "ast_data": "FunctionDef name:check_estimators_unfitted arguments arg:name arg:estimator_orig Assign Assign Call call:_regression_dataset Assign Call call:clone For If Call call:hasattr With" - }, - { - "library": "kornia", - "name": "RgbToBgr", - "source_code": "class RgbToBgr(Module): ONNX_DEFAULT_INPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1] ONNX_DEFAULT_OUTPUTSHAPE: ClassVar[list[int]] = [-1, 3, -1, -1] def forward(self, image: Tensor) -> Tensor: return rgb_to_bgr(image)", - "docstring": "Convert an image from RGB to BGR. The image data is assumed to be in the range of (0, 1). Returns: BGR version of the image. Shape: - image: :math: - output: :math: Example: >>> input = torch.rand(2, 3, 4, 5) >>> bgr = RgbToBgr() >>> output = bgr(input) # 2x3x4x5", - "type": "class", - "file_path": "kornia\\kornia\\color\\rgb.py", - "ast_data": "ClassDef name:RgbToBgr FunctionDef name:forward arguments arg:self arg:image type:Tensor Return return:yes" - }, - { - "library": "tensorflow", - "name": "register_binary_elementwise_api", - "source_code": "def register_binary_elementwise_api(func): _BINARY_ELEMENTWISE_APIS.append(func) for args, handler in _ELEMENTWISE_API_HANDLERS.items(): if len(args) = = 2: _add_dispatch_for_binary_elementwise_api(func, args[0], args[1], handler) return func", - "docstring": "Decorator that registers a TensorFlow op as a binary elementwise API.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py", - "ast_data": "FunctionDef name:register_binary_elementwise_api arguments arg:func For Call call:items If Compare op:Eq Return return:yes" - }, - { - "library": "django", - "name": "add_immediate_loading", - "source_code": "def add_immediate_loading(self, field_names): existing, defer = self.deferred_loading field_names = set(field_names) if 'pk' in field_names: field_names.remove('pk') field_names.add(self.get_meta().pk.name) if defer: self.deferred_loading = (field_names.difference(existing), False) else: self.deferred_loading = (frozenset(field_names), False)", - "docstring": "Add the given list of model field names to the set of fields to retrieve when the SQL is executed (\"immediate loading\" fields). The field names replace any existing immediate loading field names. If there are field names already specified for deferred loading, remove those names from the new field_names before storing the new names for immediate loading. (That is, immediate loading overrides any existing immediate values, but respects existing deferrals.)", - "type": "method", - "file_path": "django\\django\\db\\models\\sql\\query.py", - "ast_data": "FunctionDef name:add_immediate_loading arguments arg:self arg:field_names Assign Assign Call call:set If Compare op:In If Assign Assign" - }, - { - "library": "scipy", - "name": "write", - "source_code": "def write(self, arr): mat_tag_pos = self.file_stream.tell() if scipy.sparse.issparse(arr): self.write_sparse(arr) self.update_matrix_tag(mat_tag_pos) return narr = to_writeable(arr) if narr is None: raise TypeError(f'Could not convert {arr} (type {type(arr)}) to array') if isinstance(narr, MatlabObject): self.write_object(narr) elif isinstance(narr, MatlabFunction): raise MatWriteError('Cannot write matlab functions') elif narr is EmptyStructMarker: self.write_empty_struct() elif narr.dtype.fields: self.write_struct(narr) elif narr.dtype.hasobject: self.write_cells(narr) elif narr.dtype.kind in ('U', 'S'): if self.unicode_strings: codec = 'UTF8' else: codec = 'ascii' self.write_char(narr, codec) else: self.write_numeric(narr) self.update_matrix_tag(mat_tag_pos)", - "docstring": "Write to stream at top and sub levels Parameters ---------- arr : array_like array-like object to create writer for", - "type": "method", - "file_path": "scipy\\scipy\\io\\matlab\\_mio5.py", - "ast_data": "FunctionDef name:write arguments arg:self arg:arr Assign Call call:tell If Call call:issparse Return return:no Assign Call call:to_writeable If Compare op:Is Raise raises:TypeError(f'Could not convert {arr} (type {type(arr)}) to array') If Call call:isinstance If Call call:isinstance Raise raises:MatWriteError('Cannot write matlab functions') If Compare op:Is If If If Compare op:In If Assign Assign" - }, - { - "library": "django", - "name": "deep_deconstruct", - "source_code": "def deep_deconstruct(self, obj): if isinstance(obj, list): return [self.deep_deconstruct(value) for value in obj] elif isinstance(obj, tuple): return tuple((self.deep_deconstruct(value) for value in obj)) elif isinstance(obj, dict): return {key: self.deep_deconstruct(value) for key, value in obj.items()} elif isinstance(obj, functools.partial): return (obj.func, self.deep_deconstruct(obj.args), self.deep_deconstruct(obj.keywords)) elif isinstance(obj, COMPILED_REGEX_TYPE): return RegexObject(obj) elif isinstance(obj, type): return obj elif hasattr(obj, 'deconstruct'): deconstructed = obj.deconstruct() if isinstance(obj, models.Field): deconstructed = deconstructed[1:] path, args, kwargs = deconstructed return (path, [self.deep_deconstruct(value) for value in args], {key: self.deep_deconstruct(value) for key, value in kwargs.items()}) else: return obj", - "docstring": "Recursive deconstruction for a field and its arguments. Used for full comparison for rename/alter; sometimes a single-level deconstruction will not compare correctly.", - "type": "method", - "file_path": "django\\django\\db\\migrations\\autodetector.py", - "ast_data": "FunctionDef name:deep_deconstruct arguments arg:self arg:obj If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes If Call call:hasattr Assign Call call:deconstruct If Call call:isinstance Assign Assign Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "tuple_shapes", - "source_code": "def tuple_shapes(self): if not self.is_tuple(): raise ValueError('tuple_shapes() called on a non-tuple shape') return self._tuple_shapes", - "docstring": "If this is a tuple, returns its sequence of constituent Shape objects. Returns: Tuple sub-shapes. Raises: ValueError: if this is not a tuple.", - "type": "method", - "file_path": "tensorflow\\third_party\\xla\\xla\\python_api\\xla_shape.py", - "ast_data": "FunctionDef name:tuple_shapes arguments arg:self If Raise raises:ValueError('tuple_shapes() called on a non-tuple shape') Return return:yes" - }, - { - "library": "kornia", - "name": "forward", - "source_code": "def forward(self, pred: Tensor, target: Tensor) -> Tensor: if not (pred.shape[2:] = = target.shape[2:] and pred.size(0) = = target.size(0) and (target.size(1) = = 1)): raise ValueError(f'Prediction and target need to be of same size, and target should not be one-hot.Got {pred.shape} and {target.shape}.') if pred.size(1) < target.max().item(): raise ValueError('Invalid target value.') out = stack([self.perform_erosion(pred[:, i: i + 1], where(target = = i, tensor(1, device = target.device, dtype = target.dtype), tensor(0, device = target.device, dtype = target.dtype))) for i in range(pred.size(1))]) if self.reduction = = 'mean': out = out.mean() elif self.reduction = = 'sum': out = out.sum() elif self.reduction = = 'none': pass else: raise NotImplementedError(f'reduction `{self.reduction}` has not been implemented yet.') return out", - "docstring": "Compute Hausdorff loss. Args: pred: predicted tensor with a shape of :math: or :math:. Each channel is as binary as: 1 -> fg, 0 -> bg. target: target tensor with a shape of :math: or :math:. Returns: Estimated Hausdorff Loss.", - "type": "method", - "file_path": "kornia\\kornia\\losses\\hausdorff.py", - "ast_data": "FunctionDef name:forward arguments arg:self arg:pred type:Tensor arg:target type:Tensor If Raise raises:ValueError(f'Prediction and target need to be of same size, and target should not be one-hot.Got {pred.shape} and {target.shape}.') If Compare op:Lt Raise raises:ValueError('Invalid target value.') Assign Call call:stack If Compare op:Eq Assign Call call:mean If Compare op:Eq Assign Call call:sum If Compare op:Eq Raise raises:NotImplementedError(f'reduction `{self.reduction}` has not been implemented yet.') Return return:yes" - }, - { - "library": "mongo", - "name": "cancel_check", - "source_code": "def cancel_check(self) -> None: context = self._cancel_context if context: context.cancel()", - "docstring": "Cancel any concurrent hello check. Note: this is called from a weakref.proxy callback and MUST NOT take any locks.", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\monitor.py", - "ast_data": "FunctionDef name:cancel_check arguments arg:self Assign If" - }, - { - "library": "mongo", - "name": "close", - "source_code": "def close(self) -> None: if self._publish: assert self._listener is not None assert self._events is not None self._events.put((self._listener.publish_server_closed, (self._description.address, self._topology_id))) if _SDAM_LOGGER.isEnabledFor(logging.DEBUG): _debug_log(_SDAM_LOGGER, message = _SDAMStatusMessage.STOP_SERVER, topologyId = self._topology_id, serverHost = self._description.address[0], serverPort = self._description.address[1]) self._monitor.close() self._pool.close()", - "docstring": "Clear the connection pool and stop the monitor. Reconnect with open().", - "type": "method", - "file_path": "mongo\\pymongo\\synchronous\\server.py", - "ast_data": "FunctionDef name:close arguments arg:self If If Call call:isEnabledFor" - }, - { - "library": "tensorflow", - "name": "get", - "source_code": "@abstractmethod def get(self): raise NotImplementedError", - "docstring": "Creates a generator to extract data from the queue. Skip the data if it is . # Returns Generator yielding tuples or .", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py", - "ast_data": "FunctionDef name:get arguments arg:self Raise raises:NotImplementedError" - }, - { - "library": "scipy", - "name": "trimmed_mean", - "source_code": "def trimmed_mean(a, limits = (0.1, 0.1), inclusive = (1, 1), relative = True, axis = None): if not isinstance(limits, tuple) and isinstance(limits, float): limits = (limits, limits) if relative: return trimr(a, limits = limits, inclusive = inclusive, axis = axis).mean(axis = axis) else: return trima(a, limits = limits, inclusive = inclusive).mean(axis = axis)", - "docstring": "Returns the trimmed mean of the data along the given axis. %s", - "type": "function", - "file_path": "scipy\\scipy\\stats\\_mstats_basic.py", - "ast_data": "FunctionDef name:trimmed_mean arguments arg:a arg:limits arg:inclusive arg:relative arg:axis If BoolOp Call call:isinstance Assign If Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "request_stop", - "source_code": "def request_stop(self): raise StopIteration('step_fn has requested the iterations to stop.')", - "docstring": "Exit the training loop by causing to return . Causes to exit by raising an exception. Raises: StopIteration", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\monitored_session.py", - "ast_data": "FunctionDef name:request_stop arguments arg:self Raise raises:StopIteration('step_fn has requested the iterations to stop.')" - }, - { - "library": "pytorch", - "name": "relu", - "source_code": "def relu(input: Tensor, inplace: bool = False) -> Tensor: if has_torch_function_unary(input): return handle_torch_function(relu, (input,), input, inplace = inplace) if inplace: result = torch.relu_(input) else: result = torch.relu(input) return result", - "docstring": "relu(input, inplace=False) -> Tensor Applies the rectified linear unit function element-wise. See :class: for more details.", - "type": "function", - "file_path": "pytorch\\torch\\nn\\functional.py", - "ast_data": "FunctionDef name:relu arguments arg:input type:Tensor arg:inplace type:bool If Call call:has_torch_function_unary Return return:yes If Assign Call call:relu_ Assign Call call:relu Return return:yes" - }, - { - "library": "scikit-learn", - "name": "in_y_true_range", - "source_code": "def in_y_true_range(self, y): return self.interval_y_true.includes(y) and np.all(y.astype(int) = = y)", - "docstring": "Return True if y is in the valid range of y_true. Parameters ---------- y : ndarray", - "type": "method", - "file_path": "scikit-learn\\sklearn\\_loss\\loss.py", - "ast_data": "FunctionDef name:in_y_true_range arguments arg:self arg:y Return return:yes" - }, - { - "library": "mongo", - "name": "all_hosts", - "source_code": "@property def all_hosts(self) -> set[tuple[str, int]]: return set(map(common.clean_node, itertools.chain(self._doc.get('hosts', []), self._doc.get('passives', []), self._doc.get('arbiters', []))))", - "docstring": "List of hosts, passives, and arbiters known to this server.", - "type": "method", - "file_path": "mongo\\pymongo\\hello.py", - "ast_data": "FunctionDef name:all_hosts arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "enforce_output_layout", - "source_code": "def enforce_output_layout(gm: torch.fx.GraphModule): *_, output_node = gm.graph.nodes out_list = output_node.args[0] with gm.graph.inserting_before(output_node): for n in out_list: if not isinstance(n.meta['val'], torch.Tensor) or not torch._prims_common.is_non_overlapping_and_dense(n.meta['val']): continue ft = n.meta['val'] new_node = gm.graph.call_function(prims.inductor_force_stride_order.default, (n, ft.stride())) output_node.replace_input_with(n, new_node) gm.graph.lint() gm.recompile()", - "docstring": "Make sure the output node's layout does not change due to compiler optimizations by adding aten.as_strided nodes with the expected strides. Only used for inference so we can assume all graph outputs are model outputs.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\freezing.py", - "ast_data": "FunctionDef name:enforce_output_layout arguments arg:gm type:torch.fx.GraphModule Assign Assign With For If BoolOp Assign Assign Call call:call_function" - }, - { - "library": "tensorflow", - "name": "log_first_n", - "source_code": "@tf_export(v1 = ['logging.log_first_n']) def log_first_n(level, msg, n, *args): count = _GetNextLogCountPerToken(_GetFileAndLine()) log_if(level, msg, count < n, *args)", - "docstring": "Log 'msg % args' at level 'level' only first 'n' times. Not threadsafe. Args: level: The level at which to log. msg: The message to be logged. n: The number of times this should be called before it is logged. *args: The args to be substituted into the msg.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\platform\\tf_logging.py", - "ast_data": "FunctionDef name:log_first_n arguments arg:level arg:msg arg:n vararg:args Call call:tf_export Assign Call call:_GetNextLogCountPerToken" - }, - { - "library": "tensorflow", - "name": "supports_serialize", - "source_code": "@property def supports_serialize(self): return self._supports_serialize", - "docstring": "Whether the Reader implementation can serialize its state.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\io_ops.py", - "ast_data": "FunctionDef name:supports_serialize arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, patches, *, match_original = False, **kwargs): if match_original: def determine_facecolor(patch): if patch.get_fill(): return patch.get_facecolor() return [0, 0, 0, 0] kwargs['facecolors'] = [determine_facecolor(p) for p in patches] kwargs['edgecolors'] = [p.get_edgecolor() for p in patches] kwargs['linewidths'] = [p.get_linewidth() for p in patches] kwargs['linestyles'] = [p.get_linestyle() for p in patches] kwargs['antialiaseds'] = [p.get_antialiased() for p in patches] super().__init__(**kwargs) self.set_paths(patches)", - "docstring": "Parameters ---------- patches : list of A sequence of Patch objects. This list may include a heterogeneous assortment of different patch types. match_original : bool, default: False If True, use the colors and linewidths of the original patches. If False, new colors may be assigned by providing the standard collection arguments, facecolor, edgecolor, linewidths, norm or cmap. **kwargs All other parameters are forwarded to . If any of *edgecolors*, *facecolors*, *linewidths*, *antialiaseds* are None, they default to their patch setting, in sequence form. Notes ----- The use of functionality is optional. If the matrix `~.ScalarMappable.set_array`), at draw time a call to scalar mappable will be made to set the face colors.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\collections.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:patches kwarg:kwargs If FunctionDef name:determine_facecolor arguments arg:patch If Call call:get_fill Return return:yes Return return:yes Assign Assign Assign Assign Assign" - }, - { - "library": "tensorflow", - "name": "singleprint_from_saved_model", - "source_code": "def singleprint_from_saved_model(export_dir: str) -> str: try: return singleprint_from_fingerprint_proto(export_dir) except ValueError: pass try: write_fingerprint(export_dir) return singleprint_from_fingerprint_proto(export_dir) except ValueError: pass try: return singleprint_from_saved_model_proto(export_dir) except ValueError as e: raise ValueError(e) from None", - "docstring": "Returns the singleprint of the SavedModel in . First tries to construct the singleprint from , then from . Attempts to write the if not found, but doesn't return an error if it isn't writeable. Args: export_dir: The directory that contains the SavedModel. Returns: A string containing the singleprint of the SavedModel in . Raises: ValueError: If a valid singleprint cannot be constructed from the SavedModel.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\saved_model\\fingerprinting_utils.py", - "ast_data": "FunctionDef name:singleprint_from_saved_model arguments arg:export_dir type:str Try Return return:yes ExceptHandler Try Return return:yes ExceptHandler Try Return return:yes ExceptHandler Raise raises:ValueError(e)" - }, - { - "library": "scrapy", - "name": "peek", - "source_code": "def peek(self) -> Request | None: request = super().peek() if not request: return None return request_from_dict(request, spider = self.spider)", - "docstring": "Returns the next object to be returned by :meth:, but without removing it from the queue. Raises :exc: if the underlying queue class does not implement a `` method, which is optional for queues.", - "type": "method", - "file_path": "scrapy\\scrapy\\squeues.py", - "ast_data": "FunctionDef name:peek arguments arg:self Assign Call call:peek If Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "get_user_input_list", - "source_code": "def get_user_input_list(self, split_node: torch.fx.Node, next_users: list[torch.fx.Node]) -> list[list[Union[torch.fx.Node, _Range]]]: user_inputs_list: list[list[Union[torch.fx.Node, _Range]]] = [] for user in next_users: if user.target in (torch.cat, torch.stack): user_inputs_list.append(self.get_merged_user_inputs(split_node, user)) else: user_inputs_list.append(self.get_non_cat_node_input(split_node, user)) return user_inputs_list", - "docstring": "Returns list of inputs to the following user nodes, in order. The outer list represents the user node. The inner list represents the inputs to that particular node. This list can either contain - a tuple representing the ranges of get_items that should go into the cat (closed interval) - torch.fx.Node representing \"other\" inputs (which are not coming from our split)", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\fx_passes\\split_cat.py", - "ast_data": "FunctionDef name:get_user_input_list arguments arg:self arg:split_node type:torch.fx.Node arg:next_users type:list[torch.fx.Node] For If Compare op:In Return return:yes" - }, - { - "library": "pytorch", - "name": "RemoveInputMutation", - "source_code": "class RemoveInputMutation(_pass.Transform): def _run(self, *args) -> torch.fx.GraphModule: for node in reversed(self.module.graph.nodes): if node.op = = 'call_function' and node.target = = torch.ops.aten.copy_.default and (len(node.users) = = 0) and isinstance(node.args[0], torch.fx.Node) and (node.args[0].op = = 'placeholder'): self.module.graph.erase_node(node) return self.module", - "docstring": "Remove nodes that mutate module inputs. This pass is recommended to be used after `aten.copy_.default` nodes to the graph when it detects mutations to inputs. These nodes are not needed for ONNX export for inference. They could be useful for training.", - "type": "class", - "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\functionalization.py", - "ast_data": "ClassDef name:RemoveInputMutation FunctionDef name:_run arguments arg:self vararg:args For Call call:reversed If BoolOp Compare op:Eq Compare op:Eq Compare op:Eq Call call:isinstance Compare op:Eq Return return:yes" - }, - { - "library": "tensorflow", - "name": "name", - "source_code": "@property def name(self): return self._name", - "docstring": "Name of the layer (string), set in the constructor.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", - "ast_data": "FunctionDef name:name arguments arg:self Return return:yes" - }, - { - "library": "authlib", - "name": "validate_userinfo_encryption_enc_values_supported", - "source_code": "def validate_userinfo_encryption_enc_values_supported(self): validate_array_value(self, 'userinfo_encryption_enc_values_supported')", - "docstring": "OPTIONAL. JSON array containing a list of the JWE encryption algorithms (enc values) [JWA] supported by the UserInfo Endpoint to encode the Claims in a JWT.", - "type": "method", - "file_path": "authlib\\authlib\\oidc\\discovery\\models.py", - "ast_data": "FunctionDef name:validate_userinfo_encryption_enc_values_supported arguments arg:self" - }, - { - "library": "scipy", - "name": "hfft2", - "source_code": "@_dispatch def hfft2(x, s = None, axes = (-2, -1), norm = None, overwrite_x = False, workers = None, *, plan = None): return (Dispatchable(x, np.ndarray),)", - "docstring": "Compute the 2-D FFT of a Hermitian complex array. Parameters ---------- x : array Input array, taken to be Hermitian complex. s : sequence of ints, optional Shape of the real output. axes : sequence of ints, optional Axes over which to compute the FFT. norm : {\"backward\", \"ortho\", \"forward\"}, optional Normalization mode (see ). Default is \"backward\". overwrite_x : bool, optional If True, the contents of can be destroyed; the default is False. See for more details. workers : int, optional Maximum number of workers to use for parallel computation. If negative, the value wraps around from `~scipy.fft.ffthfftnhfftn`. Examples -------- >>> import scipy.fft >>> import numpy as np >>> x = np.array([[1+0j, 2+0j], [2+0j, 1+0j]]) # Hermitian-symmetric input >>> scipy.fft.hfft2(x, s=(2, 2)) array([[ 6., 0.], [ 0., -2.]])", - "type": "function", - "file_path": "scipy\\scipy\\fft\\_basic.py", - "ast_data": "FunctionDef name:hfft2 arguments arg:x arg:s arg:axes arg:norm arg:overwrite_x arg:workers Return return:yes" - }, - { - "library": "matplotlib", - "name": "HPacker", - "source_code": "class HPacker(PackerBase): def _get_bbox_and_child_offsets(self, renderer): dpicor = renderer.points_to_pixels(1.0) pad = self.pad * dpicor sep = self.sep * dpicor bboxes = [c.get_bbox(renderer) for c in self.get_visible_children()] if not bboxes: return (Bbox.from_bounds(0, 0, 0, 0).padded(pad), []) (y0, y1), yoffsets = _get_aligned_offsets([bbox.intervaly for bbox in bboxes], self.height, self.align) width, xoffsets = _get_packed_offsets([bbox.width for bbox in bboxes], self.width, sep, self.mode) x0 = bboxes[0].x0 xoffsets - = [bbox.x0 for bbox in bboxes] - x0 return (Bbox.from_bounds(x0, y0, width, y1 - y0).padded(pad), [*zip(xoffsets, yoffsets)])", - "docstring": "HPacker packs its children horizontally, automatically adjusting their relative positions at draw time. .. code-block:: none +-------------------------------+ | Child 1 Child 2 Child 3 | +-------------------------------+", - "type": "class", - "file_path": "matplotlib\\lib\\matplotlib\\offsetbox.py", - "ast_data": "ClassDef name:HPacker FunctionDef name:_get_bbox_and_child_offsets arguments arg:self arg:renderer Assign Call call:points_to_pixels Assign Assign Assign If Return return:yes Assign Call call:_get_aligned_offsets Assign Call call:_get_packed_offsets Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "std", - "source_code": "@_apply_docstring_templates def std(input: Union[Tensor, MaskedTensor], dim: DimOrDims = None, unbiased: Optional[bool] = None, *, correction: Optional[int] = None, keepdim: Optional[bool] = False, dtype: Optional[DType] = None, mask: Optional[Tensor] = None) -> Tensor: return _std_var(input = input, dim = dim, unbiased = unbiased, correction_opt = correction, keepdim = keepdim, dtype = dtype, mask = mask, take_sqrt = True)", - "docstring": "{reduction_signature} {reduction_descr} The identity value of sample standard deviation operation is undefined. The elements of output tensor with strided layout, that correspond to fully masked-out elements, have `` values. {reduction_args} {reduction_example}", - "type": "function", - "file_path": "pytorch\\torch\\masked\\_ops.py", - "ast_data": "FunctionDef name:std arguments arg:input type:Union[Tensor, MaskedTensor] arg:dim type:DimOrDims arg:unbiased type:Optional[bool] Return return:yes" - }, - { - "library": "coconut", - "name": "get_max_workers", - "source_code": "def get_max_workers(self): jobs = self.jobs if self.jobs is not None else base_default_jobs if jobs = = 'sys': return None else: return jobs", - "docstring": "Get the max_workers to use for creating ProcessPoolExecutor.", - "type": "method", - "file_path": "coconut\\coconut\\command\\command.py", - "ast_data": "FunctionDef name:get_max_workers arguments arg:self Assign If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_or_create_debug_dir", - "source_code": "def get_or_create_debug_dir(export_dir): debug_dir = get_debug_dir(export_dir) file_io.recursive_create_dir(debug_dir) return debug_dir", - "docstring": "Returns path to the debug sub-directory, creating if it does not exist.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\saved_model\\path_helpers.py", - "ast_data": "FunctionDef name:get_or_create_debug_dir arguments arg:export_dir Assign Call call:get_debug_dir Return return:yes" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "@_docstring.interpd def __init__(self, values, edges, *, orientation = 'vertical', baseline = 0, **kwargs): self.orientation = orientation self._edges = np.asarray(edges) self._values = np.asarray(values) self._baseline = np.asarray(baseline) if baseline is not None else None self._update_path() super().__init__(self._path, **kwargs)", - "docstring": "Parameters ---------- values : array-like The step heights. edges : array-like The edge positions, with `Patch` properties: %(Patch:kwdoc)s", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:values arg:edges kwarg:kwargs Assign Assign Call call:asarray Assign Call call:asarray Assign" - }, - { - "library": "tensorflow", - "name": "normalize_cluster_spec", - "source_code": "def normalize_cluster_spec(cluster_spec): if isinstance(cluster_spec, (dict, cluster_pb2.ClusterDef)): return server_lib.ClusterSpec(cluster_spec) elif not isinstance(cluster_spec, server_lib.ClusterSpec): raise ValueError(\"`cluster_spec' should be dict or a `tf.train.ClusterSpec` or a `tf.train.ClusterDef` object\") return cluster_spec", - "docstring": "Makes into a object. Args: cluster_spec: a dict, ClusterDef or ClusterSpec object specifying the cluster configurations. Returns: a object. Raises: ValueError: if is not a dict or a or a .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\distribute\\distribute_coordinator_utils.py", - "ast_data": "FunctionDef name:normalize_cluster_spec arguments arg:cluster_spec If Call call:isinstance Return return:yes If Raise raises:ValueError(\"`cluster_spec' should be dict or a `tf.train.ClusterSpec` or a `tf.train.ClusterDef` object\") Return return:yes" - }, - { - "library": "tensorflow", - "name": "cumprod", - "source_code": "@dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def cumprod(x, axis = 0): return math_ops.cumprod(x, axis = axis)", - "docstring": "Cumulative product of the values in a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to compute the product. Returns: A tensor of the cumulative product of values of along .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", - "ast_data": "FunctionDef name:cumprod arguments arg:x arg:axis Return return:yes" - }, - { - "library": "mongo", - "name": "update_is_writable", - "source_code": "async def update_is_writable(self, is_writable: Optional[bool]) -> None: self.is_writable = is_writable async with self.lock: for _socket in self.conns: _socket.update_is_writable(self.is_writable)", - "docstring": "Updates the is_writable attribute on all sockets currently in the Pool.", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\pool.py", - "ast_data": "AsyncFunctionDef name:update_is_writable arguments arg:self arg:is_writable type:Optional[bool] Assign" - }, - { - "library": "tensorflow", - "name": "flush", - "source_code": "def flush(self): self._session.run(self._flush_op)", - "docstring": "Flushes the event file to disk. Call this method to make sure that all pending events have been written to disk.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\summary\\writer\\event_file_writer_v2.py", - "ast_data": "FunctionDef name:flush arguments arg:self" - }, - { - "library": "django", - "name": "truncatewords_html", - "source_code": "@register.filter(is_safe = True) @stringfilter def truncatewords_html(value, arg): try: length = int(arg) except ValueError: return value return Truncator(value).words(length, html = True, truncate = ' …')", - "docstring": "Truncate HTML after number of words. Preserve newlines in the HTML.", - "type": "function", - "file_path": "django\\django\\template\\defaultfilters.py", - "ast_data": "FunctionDef name:truncatewords_html arguments arg:value arg:arg Call call:filter Try Assign Call call:int ExceptHandler Return return:yes Return return:yes" - }, - { - "library": "seaborn", - "name": "ci_to_errsize", - "source_code": "def ci_to_errsize(cis, heights): cis = np.atleast_2d(cis).reshape(2, -1) heights = np.atleast_1d(heights) errsize = [] for i, (low, high) in enumerate(np.transpose(cis)): h = heights[i] elow = h - low ehigh = high - h errsize.append([elow, ehigh]) errsize = np.asarray(errsize).T return errsize", - "docstring": "Convert intervals to error arguments relative to plot heights. Parameters ---------- cis : 2 x n sequence sequence of confidence interval limits heights : n sequence sequence of plot heights Returns ------- errsize : 2 x n array sequence of error size relative to height values in correct format as argument for plt.bar", - "type": "function", - "file_path": "seaborn\\seaborn\\utils.py", - "ast_data": "FunctionDef name:ci_to_errsize arguments arg:cis arg:heights Assign Call call:reshape Assign Call call:atleast_1d Assign For Call call:enumerate Assign Assign Assign Assign Return return:yes" - }, - { - "library": "django", - "name": "is_valid_ipv6_address", - "source_code": "def is_valid_ipv6_address(ip_addr): if isinstance(ip_addr, ipaddress.IPv6Address): return True try: _ipv6_address_from_str(ip_addr) except (TypeError, ValueError): return False return True", - "docstring": "Return whether the object is a valid IPv6 address.", - "type": "function", - "file_path": "django\\django\\utils\\ipv6.py", - "ast_data": "FunctionDef name:is_valid_ipv6_address arguments arg:ip_addr If Call call:isinstance Return return:yes Try ExceptHandler Return return:yes Return return:yes" - }, - { - "library": "scikit-learn", - "name": "__setstate__", - "source_code": "def __setstate__(self, state): super().__setstate__(state) if hasattr(self, 'X_thresholds_') and hasattr(self, 'y_thresholds_'): self._build_f(self.X_thresholds_, self.y_thresholds_)", - "docstring": "Pickle-protocol - set state of the estimator. We need to rebuild the interpolation function.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\isotonic.py", - "ast_data": "FunctionDef name:__setstate__ arguments arg:self arg:state If BoolOp Call call:hasattr Call call:hasattr" - }, - { - "library": "pytorch", - "name": "register_module_forward_hook", - "source_code": "def register_module_forward_hook(hook: Callable[..., None], *, with_kwargs: bool = False, always_call: bool = False) -> RemovableHandle: handle = RemovableHandle(_global_forward_hooks, extra_dict = _global_forward_hooks_always_called) _global_forward_hooks[handle.id] = hook if with_kwargs: _global_forward_hooks_with_kwargs[handle.id] = True if always_call: _global_forward_hooks_always_called[handle.id] = True return handle", - "docstring": "Register a global forward hook for all the modules. .. warning :: This adds global state to the module and it is only intended for debugging/profiling purposes. The hook will be called every time after :func: has computed an output. It should have the following signature:: hook(module, input, output) -> None or modified output The input contains only the positional arguments given to the module. Keyword arguments won't be passed to the hooks and only to the `forwardtorch.utils.hooks.RemovableHandle`.", - "type": "function", - "file_path": "pytorch\\torch\\nn\\modules\\module.py", - "ast_data": "FunctionDef name:register_module_forward_hook arguments arg:hook type:Callable[..., None] Assign Call call:RemovableHandle Assign If Assign If Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "driver_allocated_memory", - "source_code": "def driver_allocated_memory() -> int: return torch._C._mps_driverAllocatedMemory()", - "docstring": "Returns total GPU memory allocated by Metal driver for the process in bytes. .. note:: The returned size includes cached allocations in MPSAllocator pools as well as allocations from MPS/MPSGraph frameworks.", - "type": "function", - "file_path": "pytorch\\torch\\mps\\__init__.py", - "ast_data": "FunctionDef name:driver_allocated_memory arguments Return return:yes" - }, - { - "library": "pytorch", - "name": "visit_once", - "source_code": "def visit_once(self, thing: Any): idx = id(thing) if idx in self.visited: return False self.visited.add(idx) return True", - "docstring": "Return True on the first call to with thing, otherwise false", - "type": "method", - "file_path": "pytorch\\torch\\fx\\_symbolic_trace.py", - "ast_data": "FunctionDef name:visit_once arguments arg:self arg:thing type:Any Assign Call call:id If Compare op:In Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_flattened_names", - "source_code": "def get_flattened_names(feeds_or_fetches): lines = [] if isinstance(feeds_or_fetches, (list, tuple)): for item in feeds_or_fetches: lines.extend(get_flattened_names(item)) elif isinstance(feeds_or_fetches, dict): for key in feeds_or_fetches: lines.extend(get_flattened_names(feeds_or_fetches[key])) else: lines.append(get_graph_element_name(feeds_or_fetches)) return lines", - "docstring": "Get a flattened list of the names in run() call feeds or fetches. Args: feeds_or_fetches: Feeds or fetches of the call. It maybe a Tensor, an Operation or a Variable. It may also be nested lists, tuples or dicts. See doc of for more details. Returns: (list of str) A flattened list of fetch names from .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\common.py", - "ast_data": "FunctionDef name:get_flattened_names arguments arg:feeds_or_fetches Assign If Call call:isinstance For If Call call:isinstance For Return return:yes" - }, - { - "library": "tensorflow", - "name": "check_trace_mode", - "source_code": "@staticmethod def check_trace_mode(device_type, trace_mode): if trace_mode = = tensor_tracer_flags.TRACE_MODE_FULL_TENSOR_SUMMARY: if device_type ! = _DEVICE_TYPE_TPU: raise ValueError('Device_type \"%s\" is not yet supported for trace mode \"%s\"' % (device_type, trace_mode))", - "docstring": "Checks if the given trace mode work on the given device type. Args: device_type: Device type, TPU, GPU, CPU. trace_mode: Tensor tracer trace mode. Raises: ValueError: If the given trace mode is not supported for the device.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tensor_tracer.py", - "ast_data": "FunctionDef name:check_trace_mode arguments arg:device_type arg:trace_mode If Compare op:Eq If Compare op:NotEq Raise raises:ValueError('Device_type \"%s\" is not yet supported for trace mode \"%s\"' % (device_type, trace_mode))" - }, - { - "library": "pytorch", - "name": "load_constants", - "source_code": "def load_constants(self, constants_map: dict[str, torch.Tensor], *, check_full_update: bool, user_managed: bool = False) -> None: self.loader.load_constants(constants_map, False, check_full_update, user_managed)", - "docstring": "Given a mapping of constant fqns to tensors, load the constants into the model. You can use `` to get the list of constant fqns that are needed in the compiled model. Args: constants_map: A mapping of constant fqns to tensors. check_full_update: Whether to add check to see if all the constants are updated and have values.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\package\\package.py", - "ast_data": "FunctionDef name:load_constants arguments arg:self arg:constants_map type:dict[str, torch.Tensor]" - }, - { - "library": "tensorflow", - "name": "set_tpu_core_ids", - "source_code": "def set_tpu_core_ids(self, mesh_name, tpu_core_ids): _pywrap_dtensor_device.SetTPUCoreIDs(self._device_info, mesh_name, tpu_core_ids)", - "docstring": "Sets the singleton global device ID-to-physical core ID map. Args: mesh_name: The name of a mesh. If empty, set the default mapping. tpu_core_ids: TPU core IDs sorted by TF task/device ordinal.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\dtensor\\python\\dtensor_device.py", - "ast_data": "FunctionDef name:set_tpu_core_ids arguments arg:self arg:mesh_name arg:tpu_core_ids" - }, - { - "library": "tensorflow", - "name": "list_local_devices", - "source_code": "def list_local_devices(session_config = None): def _convert(pb_str): m = device_attributes_pb2.DeviceAttributes() m.ParseFromString(pb_str) return m serialized_config = None if session_config is not None: serialized_config = session_config.SerializeToString() return [_convert(s) for s in _pywrap_device_lib.list_devices(serialized_config)]", - "docstring": "List the available devices available in the local process. Args: session_config: a session config proto or None to use the default config. Returns: A list of protocol buffers.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\client\\device_lib.py", - "ast_data": "FunctionDef name:list_local_devices arguments arg:session_config FunctionDef name:_convert arguments arg:pb_str Assign Call call:DeviceAttributes Return return:yes Assign If Compare op:IsNot Assign Call call:SerializeToString Return return:yes" - }, - { - "library": "pytorch", - "name": "create_handler", - "source_code": "def create_handler(store: Store, backend: RendezvousBackend, params: RendezvousParameters) -> DynamicRendezvousHandler: try: timeout = RendezvousTimeout(_get_timeout(params, 'join'), _get_timeout(params, 'last_call'), _get_timeout(params, 'close'), _get_timeout(params, 'heartbeat')) keep_alive_interval = params.get_as_int('keep_alive_interval', 5) if keep_alive_interval is None: raise TypeError(\"You passed 'keep_alive_interval = None' as a rendezvous configuration option\") keep_alive_max_attempt = params.get_as_int('keep_alive_max_attempt', 3) if keep_alive_max_attempt is None: raise TypeError(\"You passed 'keep_alive_max_attempt = None' as a rendezvous configuration option\") return DynamicRendezvousHandler.from_backend(params.run_id, store, backend, params.min_nodes, params.max_nodes, params.local_addr, timeout, keep_alive_interval = keep_alive_interval, keep_alive_max_attempt = keep_alive_max_attempt) except Exception as e: construct_and_record_rdzv_event(message = f'{type(e).__name__}: {str(e)}', run_id = params.run_id, node_state = NodeState.FAILED) raise", - "docstring": "Create a new :py:class: from the specified parameters. Args: store: The C10d store to return as part of the rendezvous. backend: The backend to use to hold the rendezvous state. +-------------------+------------------------------------------------------+ | Parameter | Description | +===================+======================================================+ | join_timeout | The total time, in seconds, within which the | | | rendezvous is expected to complete. Defaults to 600 | | | seconds. | +-------------------+------------------------------------------------------+ | last_call_timeout | An additional wait amount, in seconds, before | | | completing the rendezvous once the minimum number of | | | nodes has been reached. Defaults to 30 seconds. | +-------------------+------------------------------------------------------+ | close_timeout | The time, in seconds, within which the rendezvous is | | | expected to close after a call to | | | :py:meth: or | | | :py:meth:. Defaults to | | | 30 seconds. | +-------------------+------------------------------------------------------+ | heartbeat | The time, in seconds, within which a keep-alive | | | heartbeat is expected to complete | +-------------------+------------------------------------------------------+", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\elastic\\rendezvous\\dynamic_rendezvous.py", - "ast_data": "FunctionDef name:create_handler arguments arg:store type:Store arg:backend type:RendezvousBackend arg:params type:RendezvousParameters Try Assign Call call:RendezvousTimeout Assign Call call:get_as_int If Compare op:Is Raise raises:TypeError(\"You passed 'keep_alive_interval=None' as a rendezvous configuration option\") Assign Call call:get_as_int If Compare op:Is Raise raises:TypeError(\"You passed 'keep_alive_max_attempt=None' as a rendezvous configuration option\") Return return:yes ExceptHandler Raise" - }, - { - "library": "pytorch", - "name": "get_mask_mod", - "source_code": "def get_mask_mod(self, mask_mod: Optional[_mask_mod_signature]) -> _mask_mod_signature: if mask_mod is None: mask_mod = noop_mask def new_mask_mod(b: torch.Tensor, h: torch.Tensor, q_idx: torch.Tensor, physical_kv_idx: torch.Tensor): physical_kv_block = physical_kv_idx // self.page_size physical_kv_offset = physical_kv_idx % self.page_size logical_block_idx = self.physical_to_logical[b, physical_kv_block] logical_kv_idx = logical_block_idx * self.page_size + physical_kv_offset return torch.where(logical_block_idx > = 0, mask_mod(b, h, q_idx, logical_kv_idx), False) return new_mask_mod", - "docstring": "Converts a mask_mod based on mapping from the physical block index to the logical block index. Args: mask_mod (_mask_mod_signature): mask_mod based on the logical block index.", - "type": "method", - "file_path": "pytorch\\torch\\nn\\attention\\experimental\\_paged_attention.py", - "ast_data": "FunctionDef name:get_mask_mod arguments arg:self arg:mask_mod type:Optional[_mask_mod_signature] If Compare op:Is Assign FunctionDef name:new_mask_mod arguments arg:b type:torch.Tensor arg:h type:torch.Tensor arg:q_idx type:torch.Tensor arg:physical_kv_idx type:torch.Tensor Assign Assign Assign Assign Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "__init__", - "source_code": "def __init__(self, tensor_size, input_var, index1, index2, output): assert isinstance(input_var, TVar) assert isinstance(output, TVar) assert isinstance(index1, int) assert isinstance(index2, int) self.input_var = input_var self.tensor_size = tensor_size self.index1 = index1 self.index2 = index2 self.output = output", - "docstring": "Args: tensor_size: current tensor size input_var: variable to hold input index1: dimension 1 index2: dimension 2 output: output that stores result", - "type": "method", - "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:tensor_size arg:input_var arg:index1 arg:index2 arg:output Assign Assign Assign Assign Assign" - }, - { - "library": "authlib", - "name": "create_bearer_token_validator", - "source_code": "def create_bearer_token_validator(session, token_model): from authlib.oauth2.rfc6750 import BearerTokenValidator class _BearerTokenValidator(BearerTokenValidator): def authenticate_token(self, token_string): q = session.query(token_model) return q.filter_by(access_token = token_string).first() return _BearerTokenValidator", - "docstring": "Create an bearer token validator class with SQLAlchemy session and token model. :param session: SQLAlchemy session :param token_model: Token model class", - "type": "function", - "file_path": "authlib\\authlib\\integrations\\sqla_oauth2\\functions.py", - "ast_data": "FunctionDef name:create_bearer_token_validator arguments arg:session arg:token_model ClassDef name:_BearerTokenValidator FunctionDef name:authenticate_token arguments arg:self arg:token_string Assign Call call:query Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "custom_train_test_split", - "source_code": "def custom_train_test_split(self, df, test_size = 0.2, val_size = 0.25, random_state = 42): exclude_columns = ['speedup', 'winner', 'target'] feature_columns = [col for col in df.columns if col not in exclude_columns and (not col.startswith(CHOICE_COL + '_'))] df['input_id'] = df.groupby(feature_columns).ngroup() unique_inputs = df['input_id'].unique() train_val_inputs, test_inputs = train_test_split(unique_inputs, test_size = test_size, random_state = random_state) train_inputs, val_inputs = train_test_split(train_val_inputs, test_size = val_size, random_state = random_state) train_mask = df['input_id'].isin(train_inputs) val_mask = df['input_id'].isin(val_inputs) test_mask = df['input_id'].isin(test_inputs) df_train = df[train_mask] df_val = df[val_mask] df_test = df[test_mask] df_train = df_train.drop('input_id', axis = 1) df_val = df_val.drop('input_id', axis = 1) df_test = df_test.drop('input_id', axis = 1) return (df_train, df_val, df_test, feature_columns)", - "docstring": "Splits the dataframe into train, val, and test sets. Also adds other datasets, specified by the user, to the train set. We need to be careful, because we want to make sure that rows with the same input but different choice are kept in the same set, e.g. Rows that looks like this input_1,choice1,... input_1,choice2,... should be in the same set.", - "type": "method", - "file_path": "pytorch\\torchgen\\_autoheuristic\\train_regression.py", - "ast_data": "FunctionDef name:custom_train_test_split arguments arg:self arg:df arg:test_size arg:val_size arg:random_state Assign Assign Assign Call call:ngroup Assign Call call:unique Assign Call call:train_test_split Assign Call call:train_test_split Assign Call call:isin Assign Call call:isin Assign Call call:isin Assign Assign Assign Assign Call call:drop Assign Call call:drop Assign Call call:drop Return return:yes" - }, - { - "library": "pytorch", - "name": "create_root", - "source_code": "@classmethod def create_root(cls) -> _ModuleMeta: return _ModuleMeta('', None, ('', None))", - "docstring": "Create an empty module meta representing root module.", - "type": "method", - "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\modularization.py", - "ast_data": "FunctionDef name:create_root arguments arg:cls Return return:yes" - }, - { - "library": "django", - "name": "time_format", - "source_code": "def time_format(value, format = None, use_l10n = None): return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n = use_l10n))", - "docstring": "Format a datetime.time object using a localizable format. If use_l10n is provided and is not None, it forces the value to be localized (or not), otherwise it's always localized.", - "type": "function", - "file_path": "django\\django\\utils\\formats.py", - "ast_data": "FunctionDef name:time_format arguments arg:value arg:format arg:use_l10n Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_cudnn_version", - "source_code": "def get_cudnn_version(): key = 'cudnn_ver' cmds = cmds_all[PLATFORM.lower()][key] out, err = run_shell_cmd(cmds[0]) if err and FLAGS.debug: print('Error in finding `cudnn.h`: \\n %s' % str(err)) if len(out.split(b' ')) > 1: cmd = cmds[0] + ' | ' + cmds[1] out_re, err_re = run_shell_cmd(cmd) if err_re and FLAGS.debug: print('Error in detecting cuDNN version: \\n %s' % str(err_re)) return out_re.strip(b'\\n') else: return", - "docstring": "Retrieves the version of cuDNN library detected. Returns: String that is the version of cuDNN library detected. e.g. '7.5.0'", - "type": "function", - "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\config_detector.py", - "ast_data": "FunctionDef name:get_cudnn_version arguments Assign Assign Assign Call call:run_shell_cmd If BoolOp If Compare op:Gt Assign Assign Call call:run_shell_cmd If BoolOp Return return:yes Return return:no" - }, - { - "library": "matplotlib", - "name": "get_clip_path", - "source_code": "def get_clip_path(self): if self._clippath is not None: tpath, tr = self._clippath.get_transformed_path_and_affine() if np.all(np.isfinite(tpath.vertices)): return (tpath, tr) else: _log.warning('Ill-defined clip_path detected. Returning None.') return (None, None) return (None, None)", - "docstring": "Return the clip path in the form (path, transform), where path is a instance, and transform is an affine transform to apply to the path before clipping.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", - "ast_data": "FunctionDef name:get_clip_path arguments arg:self If Compare op:IsNot Assign Call call:get_transformed_path_and_affine If Call call:all Return return:yes Return return:yes Return return:yes" - }, - { - "library": "scipy", - "name": "integrate_kde", - "source_code": "def integrate_kde(self, other): if other.d ! = self.d: raise ValueError('KDEs are not the same dimensionality') if other.n < self.n: small = other large = self else: small = self large = other sum_cov = small.covariance + large.covariance sum_cov_chol = linalg.cho_factor(sum_cov) result = 0.0 for i in range(small.n): mean = small.dataset[:, i, newaxis] diff = large.dataset - mean tdiff = linalg.cho_solve(sum_cov_chol, diff) energies = np_vecdot(diff, tdiff, axis = 0) / 2.0 result + = np_vecdot(exp(-energies), large.weights, axis = 0) * small.weights[i] sqrt_det = np.prod(np.diagonal(sum_cov_chol[0])) norm_const = power(2 * pi, sum_cov.shape[0] / 2.0) * sqrt_det result / = norm_const return result", - "docstring": "Computes the integral of the product of this kernel density estimate with another. Parameters ---------- other : gaussian_kde instance The other kde. Returns ------- value : scalar The result of the integral. Raises ------ ValueError If the KDEs have different dimensionality.", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_kde.py", - "ast_data": "FunctionDef name:integrate_kde arguments arg:self arg:other If Compare op:NotEq Raise raises:ValueError('KDEs are not the same dimensionality') If Compare op:Lt Assign Assign Assign Assign Assign Assign Call call:cho_factor Assign For Call call:range Assign Assign Assign Call call:cho_solve Assign Assign Call call:prod Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "get_arg_indices_of_inputs_to_log", - "source_code": "def get_arg_indices_of_inputs_to_log(node: Node) -> list[int]: if len(node.args) = = 0: return [] if node.op = = 'call_function' and (node.target in (torch.add, torch.ops.quantized.add, operator.add) or node.target in (torch.mul, torch.ops.quantized.mul, operator.mul)): result = [i for i in range(2) if type(node.args[i]) = = Node] return result return [0]", - "docstring": "Returns the indices of args of the node which we should attach loggers to, if input logging is enabled. For example, * for (x + y), returns [0, 1] * for (1 + y), returns [1] * for (x + 1), returns [0] * for (linear(x, w, b)) returns [0] * by default, returns [0]", - "type": "function", - "file_path": "pytorch\\torch\\ao\\ns\\fx\\utils.py", - "ast_data": "FunctionDef name:get_arg_indices_of_inputs_to_log arguments arg:node type:Node If Compare op:Eq Return return:yes If BoolOp Compare op:Eq BoolOp Compare op:In Compare op:In Assign Return return:yes Return return:yes" - }, - { - "library": "seaborn", - "name": "__init__", - "source_code": "def __init__(self, stat = 'count', bins = 'auto', binwidth = None, binrange = None, discrete = False, cumulative = False): stat_choices = ['count', 'frequency', 'density', 'probability', 'proportion', 'percent'] _check_argument('stat', stat_choices, stat) self.stat = stat self.bins = bins self.binwidth = binwidth self.binrange = binrange self.discrete = discrete self.cumulative = cumulative self.bin_kws = None", - "docstring": "Initialize the estimator with its parameters. Parameters ---------- stat : str Aggregate statistic to compute in each bin. - : show the number of observations in each bin - : show the number of observations divided by the bin width - or : normalize such that bar heights sum to 1 - : normalize such that bar heights sum to 100 - : normalize such that the total area of the histogram equals 1 bins : str, number, vector, or a pair of such values Generic bin parameter that can be the name of a reference rule, the number of bins, or the breaks of the bins. Passed to :func:. binwidth : number or pair of numbers Width of each bin, overrides `` such that bin edges cover integer values in the dataset. cumulative : bool If True, return the cumulative statistic.", - "type": "method", - "file_path": "seaborn\\seaborn\\_statistics.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:stat arg:bins arg:binwidth arg:binrange arg:discrete arg:cumulative Assign Assign Assign Assign Assign Assign Assign Assign" - }, - { - "library": "tensorflow", - "name": "Sequence", - "source_code": "class Sequence(object): @abstractmethod def __getitem__(self, index): raise NotImplementedError @abstractmethod def __len__(self): raise NotImplementedError def on_epoch_end(self): pass def __iter__(self): for item in (self[i] for i in range(len(self))): yield item", - "docstring": "Base object for fitting to a sequence of data, such as a dataset. Every must implement the and the methods. If you want to modify your dataset between epochs you may implement . The method should return a complete batch. Notes: are a safer way to do multiprocessing. This structure guarantees that the network will only train once on each sample per epoch which is not the case with generators. Examples:", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\keras\\utils\\data_utils.py", - "ast_data": "ClassDef name:Sequence FunctionDef name:__getitem__ arguments arg:self arg:index Raise raises:NotImplementedError FunctionDef name:__len__ arguments arg:self Raise raises:NotImplementedError FunctionDef name:on_epoch_end arguments arg:self FunctionDef name:__iter__ arguments arg:self For" - }, - { - "library": "scikit-learn", - "name": "decision_function", - "source_code": "@_available_if_base_estimator_has('decision_function') def decision_function(self, X): return self._get_predictions(X, output_method = 'decision_function')", - "docstring": "Evaluate the decision_function of the models in the chain. Parameters ---------- X : array-like of shape (n_samples, n_features) The input data. Returns ------- Y_decision : array-like of shape (n_samples, n_classes) Returns the decision function of the sample for each model in the chain.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\multioutput.py", - "ast_data": "FunctionDef name:decision_function arguments arg:self arg:X Call call:_available_if_base_estimator_has Return return:yes" - }, - { - "library": "scrapy", - "name": "__init__", - "source_code": "def __init__(self, uri: URI, settings: Settings, conn_lost_deferred: Deferred[list[BaseException]]) -> None: self._conn_lost_deferred: Deferred[list[BaseException]] = conn_lost_deferred config = H2Configuration(client_side = True, header_encoding = 'utf-8') self.conn = H2Connection(config = config) self._stream_id_generator = itertools.count(start = 1, step = 2) self.streams: dict[int, Stream] = {} self._pending_request_stream_pool: deque[Stream] = deque() self._conn_lost_errors: list[BaseException] = [] self.metadata: dict[str, Any] = {'certificate': None, 'ip_address': None, 'uri': uri, 'default_download_maxsize': settings.getint('DOWNLOAD_MAXSIZE'), 'default_download_warnsize': settings.getint('DOWNLOAD_WARNSIZE'), 'active_streams': 0, 'settings_acknowledged': False}", - "docstring": "Arguments: uri -- URI of the base url to which HTTP/2 Connection will be made. uri is used to verify that incoming client requests have correct base URL. settings -- Scrapy project settings conn_lost_deferred -- Deferred fires with the reason: Failure to notify that connection was lost", - "type": "method", - "file_path": "scrapy\\scrapy\\core\\http2\\protocol.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:uri type:URI arg:settings type:Settings arg:conn_lost_deferred type:Deferred[list[BaseException]] Assign Call call:H2Configuration Assign Call call:H2Connection Assign Call call:count" - }, - { - "library": "pytorch", - "name": "dim", - "source_code": "@_onnx_symbolic('aten: : dim') def dim(g: jit_utils.GraphContext, self): shape = g.op('Shape', self) return g.op('Size', shape)", - "docstring": "Implement the dim functionality available for a pytorch tensor in ONNX", - "type": "function", - "file_path": "pytorch\\torch\\onnx\\symbolic_opset9.py", - "ast_data": "FunctionDef name:dim arguments arg:g type:jit_utils.GraphContext arg:self Call call:_onnx_symbolic Assign Call call:op Return return:yes" - }, - { - "library": "pytorch", - "name": "fp16_bf16_reduction_math_sdp_allowed", - "source_code": "def fp16_bf16_reduction_math_sdp_allowed(): return torch._C._get_math_sdp_allow_fp16_bf16_reduction()", - "docstring": ".. warning:: This flag is beta and subject to change. Returns whether fp16/bf16 reduction in math scaled dot product attention is enabled or not.", - "type": "function", - "file_path": "pytorch\\torch\\backends\\cuda\\__init__.py", - "ast_data": "FunctionDef name:fp16_bf16_reduction_math_sdp_allowed arguments Return return:yes" - }, - { - "library": "django", - "name": "login_required", - "source_code": "def login_required(function = None, redirect_field_name = REDIRECT_FIELD_NAME, login_url = None): actual_decorator = user_passes_test(lambda u: u.is_authenticated, login_url = login_url, redirect_field_name = redirect_field_name) if function: return actual_decorator(function) return actual_decorator", - "docstring": "Decorator for views that checks that the user is logged in, redirecting to the log-in page if necessary.", - "type": "function", - "file_path": "django\\django\\contrib\\auth\\decorators.py", - "ast_data": "FunctionDef name:login_required arguments arg:function arg:redirect_field_name arg:login_url Assign Call call:user_passes_test If Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "atomic_write_string_to_file", - "source_code": "def atomic_write_string_to_file(filename, contents, overwrite = True): if not has_atomic_move(filename): write_string_to_file(filename, contents) else: temp_pathname = filename + '.tmp' + uuid.uuid4().hex write_string_to_file(temp_pathname, contents) try: rename(temp_pathname, filename, overwrite) except errors.OpError: delete_file(temp_pathname) raise", - "docstring": "Writes to atomically. This means that when appears in the filesystem, it will contain all of . With write_string_to_file, it is possible for the file to appear in the filesystem with only partially written. Accomplished by writing to a temp file and then renaming it. Args: filename: string, pathname for a file contents: string, contents that need to be written to the file overwrite: boolean, if false it's an error for to be occupied by an existing file.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\lib\\io\\file_io.py", - "ast_data": "FunctionDef name:atomic_write_string_to_file arguments arg:filename arg:contents arg:overwrite If Assign Try ExceptHandler Raise" - }, - { - "library": "matplotlib", - "name": "view_limits", - "source_code": "def view_limits(self, dmin, dmax): if mpl.rcParams['axes.autolimit_mode'] = = 'round_numbers': vmin = self._edge.le(dmin - self._offset) * self._edge.step + self._offset vmax = self._edge.ge(dmax - self._offset) * self._edge.step + self._offset if vmin = = vmax: vmin - = 1 vmax + = 1 else: vmin = dmin vmax = dmax return mtransforms.nonsingular(vmin, vmax)", - "docstring": "Set the view limits to the nearest tick values that contain the data.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", - "ast_data": "FunctionDef name:view_limits arguments arg:self arg:dmin arg:dmax If Compare op:Eq Assign Assign If Compare op:Eq Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "verify_tensor_all_finite", - "source_code": "@tf_export(v1 = ['debugging.assert_all_finite', 'verify_tensor_all_finite']) @dispatch.add_dispatch_support @deprecation.deprecated_endpoints('verify_tensor_all_finite') def verify_tensor_all_finite(t = None, msg = None, name = None, x = None, message = None): x = deprecation.deprecated_argument_lookup('x', x, 't', t) message = deprecation.deprecated_argument_lookup('message', message, 'msg', msg) return verify_tensor_all_finite_v2(x, message, name)", - "docstring": "Assert that the tensor does not contain any NaN's or Inf's. Args: t: Tensor to check. msg: Message to log on failure. name: A name for this operation (optional). x: Alias for t. message: Alias for msg. Returns: Same tensor as .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\numerics.py", - "ast_data": "FunctionDef name:verify_tensor_all_finite arguments arg:t arg:msg arg:name arg:x arg:message Call call:tf_export Call call:deprecated_endpoints Assign Call call:deprecated_argument_lookup Assign Call call:deprecated_argument_lookup Return return:yes" - }, - { - "library": "scikit-learn", - "name": "setup_cache", - "source_code": "def setup_cache(self): clear_tmp() param_grid = list(itertools.product(*self.params)) for params in param_grid: if self.skip(params): continue estimator = self.make_estimator(params) X, _, y, _ = self.make_data(params) estimator.fit(X, y) est_path = get_estimator_path(self, Benchmark.save_dir, params, Benchmark.save_estimators) with est_path.open(mode = 'wb') as f: pickle.dump(estimator, f)", - "docstring": "Pickle a fitted estimator for all combinations of parameters", - "type": "method", - "file_path": "scikit-learn\\asv_benchmarks\\benchmarks\\common.py", - "ast_data": "FunctionDef name:setup_cache arguments arg:self Assign Call call:list For If Call call:skip Assign Call call:make_estimator Assign Call call:make_data Assign Call call:get_estimator_path With" - }, - { - "library": "matplotlib", - "name": "score_family", - "source_code": "def score_family(self, families, family2): if not isinstance(families, (list, tuple)): families = [families] elif len(families) = = 0: return 1.0 family2 = family2.lower() step = 1 / len(families) for i, family1 in enumerate(families): family1 = family1.lower() if family1 in font_family_aliases: options = [*map(str.lower, self._expand_aliases(family1))] if family2 in options: idx = options.index(family2) return (i + idx / len(options)) * step elif family1 = = family2: return i * step return 1.0", - "docstring": "Return a match score between the list of font families in *families* and the font family name *family2*. An exact match at the head of the list returns 0.0. A match further down the list will return between 0 and 1. No match will return 1.0.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\font_manager.py", - "ast_data": "FunctionDef name:score_family arguments arg:self arg:families arg:family2 If Assign If Compare op:Eq Return return:yes Assign Call call:lower Assign For Call call:enumerate Assign Call call:lower If Compare op:In Assign If Compare op:In Assign Call call:index Return return:yes If Compare op:Eq Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_logical_device_configuration", - "source_code": "def get_logical_device_configuration(self, dev): self._initialize_physical_devices() if dev not in self._physical_devices: raise ValueError('Unrecognized device: %s' % repr(dev)) return self._virtual_device_map.get(dev)", - "docstring": "Get the virtual device configuration for a PhysicalDevice.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\eager\\context.py", - "ast_data": "FunctionDef name:get_logical_device_configuration arguments arg:self arg:dev If Compare op:NotIn Raise raises:ValueError('Unrecognized device: %s' % repr(dev)) Return return:yes" - }, - { - "library": "matplotlib", - "name": "monochrome", - "source_code": "@property def monochrome(self): if not self._isinit: self._init() return self.N < = 1 or np.all(self._lut[0] = = self._lut[1: self.N])", - "docstring": "Return whether all colors in the colormap are identical.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\colors.py", - "ast_data": "FunctionDef name:monochrome arguments arg:self If Return return:yes" - }, - { - "library": "django", - "name": "walk_to_end", - "source_code": "def walk_to_end(ch, input_iter): if ch = = '(': nesting = 1 else: nesting = 0 for ch, escaped in input_iter: if escaped: continue elif ch = = '(': nesting + = 1 elif ch = = ')': if not nesting: return nesting - = 1", - "docstring": "The iterator is currently inside a capturing group. Walk to the close of this group, skipping over any nested groups and handling escaped parentheses correctly.", - "type": "function", - "file_path": "django\\django\\utils\\regex_helper.py", - "ast_data": "FunctionDef name:walk_to_end arguments arg:ch arg:input_iter If Compare op:Eq Assign Assign For If If Compare op:Eq If Compare op:Eq If Return return:no" - }, - { - "library": "tensorflow", - "name": "OutsideCompilationV2Context", - "source_code": "class OutsideCompilationV2Context(control_flow_ops.ControlFlowContext): def __init__(self, name: Text, is_map_outside_compilation = False): control_flow_ops.ControlFlowContext.__init__(self) self._name = name self._is_map_outside_compilation = is_map_outside_compilation def AddOp(self, op: ops.Operation) -> None: if self._outer_context: self._outer_context.AddOp(op) self._set_outside_compilation_attributes(op) def AddInnerOp(self, op: ops.Operation) -> None: if self._outer_context: self._outer_context.AddInnerOp(op) self._set_outside_compilation_attributes(op) def to_control_flow_context_def(self, context_def, export_scope = None): raise NotImplementedError def _set_outside_compilation_attributes(self, op: ops.Operation) -> None: op._set_attr(_OUTSIDE_COMPILATION_ATTR, attr_value_pb2.AttrValue(s = compat.as_bytes(self._name))) if self._is_map_outside_compilation: op._set_attr(_MAP_OUTSIDE_COMPILATION_ATTR, attr_value_pb2.AttrValue(b = True))", - "docstring": "The context for outside compilation in Tensorflow 2.0. Every op added in this context will be assigned an _xla_outside_compilation attribute.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_replication.py", - "ast_data": "ClassDef name:OutsideCompilationV2Context FunctionDef name:__init__ arguments arg:self arg:name type:Text arg:is_map_outside_compilation Assign Assign FunctionDef name:AddOp arguments arg:self arg:op type:ops.Operation If FunctionDef name:AddInnerOp arguments arg:self arg:op type:ops.Operation If FunctionDef name:to_control_flow_context_def arguments arg:self arg:context_def arg:export_scope Raise raises:NotImplementedError FunctionDef name:_set_outside_compilation_attributes arguments arg:self arg:op type:ops.Operation If" - }, - { - "library": "scrapy", - "name": "allowed", - "source_code": "@abstractmethod def allowed(self, url: str | bytes, user_agent: str | bytes) -> bool: pass", - "docstring": "Return ``. :param url: Absolute URL :type url: str or bytes :param user_agent: User agent :type user_agent: str or bytes", - "type": "method", - "file_path": "scrapy\\scrapy\\robotstxt.py", - "ast_data": "FunctionDef name:allowed arguments arg:self arg:url type:str | bytes arg:user_agent type:str | bytes" - }, - { - "library": "tensorflow", - "name": "variable_op_v2", - "source_code": "def variable_op_v2(shape, dtype, name = 'Variable', container = '', shared_name = ''): return gen_state_ops.variable_v2(shape = shape, dtype = dtype, name = name, container = container, shared_name = shared_name)", - "docstring": "Create a variable Operation. See also variables.Variable. Args: shape: The shape of the tensor managed by this variable dtype: The underlying type of the tensor values. name: optional name to use for the variable op. container: An optional string. Defaults to \"\". If non-empty, this variable is placed in the given container. Otherwise, a default container is used. shared_name: An optional string. Defaults to \"\". If non-empty, this variable is named in the given bucket with this shared_name. Otherwise, the node name is used instead. Returns: A variable tensor.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\state_ops.py", - "ast_data": "FunctionDef name:variable_op_v2 arguments arg:shape arg:dtype arg:name arg:container arg:shared_name Return return:yes" - }, - { - "library": "pytorch", - "name": "transform_dimension", - "source_code": "def transform_dimension(dimension, counter, dimension_dict): if dimension = = Dyn: counter + = 1 return (D(0, z3.Int(counter)), counter) elif isinstance(dimension, int): return (D(1, dimension), counter) elif isinstance(dimension, DVar): if dimension.c in dimension_dict: return (D(z3.Int(dimension_dict[dimension.c]), z3.Int(dimension.c)), counter) else: counter + = 1 dimension_dict[dimension.c] = counter return (D(z3.Int(counter), z3.Int(dimension.c)), counter)", - "docstring": "Takes a dimension variable or a number and transforms it to a tuple according to our scheme Args: dimension: The dimension to be transformed counter: variable tracking Returns: tuple and the current counter", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\transform_to_z3.py", - "ast_data": "FunctionDef name:transform_dimension arguments arg:dimension arg:counter arg:dimension_dict If Compare op:Eq Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance If Compare op:In Return return:yes Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "prompt_loop_or_load_from_env", - "source_code": "def prompt_loop_or_load_from_env(environ_cp, var_name, var_default, ask_for_var, check_success, error_msg, suppress_default_error = False, resolve_symlinks = False, n_ask_attempts = _DEFAULT_PROMPT_ASK_ATTEMPTS): default = environ_cp.get(var_name) or var_default full_query = '%s [Default is %s]: ' % (ask_for_var, default) for _ in range(n_ask_attempts): val = get_from_env_or_user_or_default(environ_cp, var_name, full_query, default) if check_success(val): break if not suppress_default_error: print(error_msg % val) environ_cp[var_name] = '' else: raise UserInputError('Invalid %s setting was provided %d times in a row. Assuming to be a scripting mistake.' % (var_name, n_ask_attempts)) if resolve_symlinks: val = os.path.realpath(val) environ_cp[var_name] = val return val", - "docstring": "Loop over user prompts for an ENV param until receiving a valid response. For the env param var_name, read from the environment or verify user input until receiving valid input. When done, set var_name in the environ_cp to its new value. Args: environ_cp: (Dict) copy of the os.environ. var_name: (String) string for name of environment variable, e.g. \"TF_MYVAR\". var_default: (String) default value string. ask_for_var: (String) string for how to ask for user input. check_success: (Function) function that takes one argument and returns a boolean. Should return True if the value provided is considered valid. May contain a complex error message if error_msg does not provide enough information. In that case, set suppress_default_error to True. error_msg: (String) String with one and only one '%s'. Formatted with each invalid response upon check_success(input) failure. suppress_default_error: (Bool) Suppress the above error message in favor of one from the check_success function. resolve_symlinks: (Bool) Translate symbolic links into the real filepath. n_ask_attempts: (Integer) Number of times to query for valid input before raising an error and quitting. Returns: [String] The value of var_name after querying for input. Raises: UserInputError: if a query has been attempted n_ask_attempts times without success, assume that the user has made a scripting error, and will continue to provide invalid input. Raise the error to avoid infinitely looping.", - "type": "function", - "file_path": "tensorflow\\configure.py", - "ast_data": "FunctionDef name:prompt_loop_or_load_from_env arguments arg:environ_cp arg:var_name arg:var_default arg:ask_for_var arg:check_success arg:error_msg arg:suppress_default_error arg:resolve_symlinks arg:n_ask_attempts Assign BoolOp Call call:get Assign For Call call:range Assign Call call:get_from_env_or_user_or_default If Call call:check_success If Assign Raise raises:UserInputError('Invalid %s setting was provided %d times in a row. Assuming to be a scripting mistake.' % (var_name, n_ask_attempts)) If Assign Call call:realpath Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "end", - "source_code": "def end(self, session): pass", - "docstring": "Called at the end of session. The argument can be used in case the hook wants to run final ops, such as saving a last checkpoint. If raises exception other than OutOfRangeError or StopIteration then is not called. Note the difference between and behavior when raises OutOfRangeError or StopIteration. In that case is called but is not called. Args: session: A TensorFlow Session that will be soon closed.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\session_run_hook.py", - "ast_data": "FunctionDef name:end arguments arg:self arg:session" - }, - { - "library": "tensorflow", - "name": "is_layouts_same", - "source_code": "def is_layouts_same(self, embedding_layouts) -> bool: if self._checkpoint_layouts.keys() ! = embedding_layouts.keys(): raise ValueError('Layouts in checkpoint and embedding must have the same keys. found {} and {}'.format(self._checkpoint_layouts.keys(), embedding_layouts.keys())) for key, layout in self._checkpoint_layouts.items(): if not compare.ProtoEq(layout, embedding_layouts[key]): logging.info('Layouts do not match for %s this will require resharding; %s vs %s', key, layout, embedding_layouts[key]) return False return True", - "docstring": "Returns True if the all the embedding and checkpoint layouts are the same. Args: embedding_layouts: dict of layouts for embedding tables. Raises: ValueError if the embedding layouts and checkpoint layouts do not have the same keys. Returns: Bool representing if the embedding layouts match the layouts in checkpoint.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\tpu_embedding_v3_checkpoint_adapter.py", - "ast_data": "FunctionDef name:is_layouts_same arguments arg:self arg:embedding_layouts If Compare op:NotEq Raise raises:ValueError('Layouts in checkpoint and embedding must have the same keys. found {} and {}'.format(self._checkpoint_layouts.keys(), embedding_layouts.keys())) For Call call:items If Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "in_defun", - "source_code": "def in_defun(): if context.executing_eagerly(): return False graph = ops.get_default_graph() while isinstance(graph, CondBranchFuncGraph) or isinstance(graph, WhileBodyFuncGraph) or isinstance(graph, WhileCondFuncGraph): graph = graph.outer_graph return isinstance(graph, FuncGraph)", - "docstring": "Returns if the current graph is, or is nested in, a defun.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_util_v2.py", - "ast_data": "FunctionDef name:in_defun arguments If Call call:executing_eagerly Return return:yes Assign Call call:get_default_graph While BoolOp Call call:isinstance Call call:isinstance Call call:isinstance Assign Return return:yes" - }, - { - "library": "django", - "name": "__init__", - "source_code": "def __init__(self, stdin, stdout, stderr, environ, **kwargs): try: content_length = int(environ.get('CONTENT_LENGTH')) except (ValueError, TypeError): content_length = 0 super().__init__(LimitedStream(stdin, content_length), stdout, stderr, environ, **kwargs)", - "docstring": "Use a LimitedStream so that unread request data will be ignored at the end of the request. WSGIRequest uses a LimitedStream but it shouldn't discard the data since the upstream servers usually do this. This fix applies only for testserver/runserver.", - "type": "method", - "file_path": "django\\django\\core\\servers\\basehttp.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:stdin arg:stdout arg:stderr arg:environ kwarg:kwargs Try Assign Call call:int ExceptHandler Assign" - }, - { - "library": "pytorch", - "name": "update_mask", - "source_code": "def update_mask(self, name, data, configs): mask = self.get_mask(name) sparse_config = configs['sparse_config'] features = configs['features'] reduce_fn = configs['reduce_fn'] mask_fn = configs['mask_fn'] if features is None: data = reduce_fn(data) mask.data = mask_fn(data, **sparse_config) else: for feature_idx in range(len(features)): data_feature = reduce_fn(data[feature_idx]) mask[feature_idx].data = mask_fn(data_feature, **sparse_config)", - "docstring": "Called for each registered layer and does the following- 1. apply reduce_fn on the aggregated activations 2. use mask_fn to compute the sparsification mask Note: the reduce_fn and mask_fn is called for each feature, dim over the data", - "type": "method", - "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py", - "ast_data": "FunctionDef name:update_mask arguments arg:self arg:name arg:data arg:configs Assign Call call:get_mask Assign Assign Assign Assign If Compare op:Is Assign Call call:reduce_fn Assign Call call:mask_fn For Call call:range Assign Call call:reduce_fn Assign Call call:mask_fn" - }, - { - "library": "algorithms", - "name": "find_min_rotate", - "source_code": "def find_min_rotate(array): low = 0 high = len(array) - 1 while low < high: mid = (low + high) // 2 if array[mid] > array[high]: low = mid + 1 else: high = mid return array[low]", - "docstring": "Finds the minimum element in a sorted array that has been rotated.", - "type": "function", - "file_path": "algorithms\\algorithms\\search\\find_min_rotate.py", - "ast_data": "FunctionDef name:find_min_rotate arguments arg:array Assign Assign While Compare op:Lt Assign If Compare op:Gt Assign Assign Return return:yes" - }, - { - "library": "scipy", - "name": "logpdf", - "source_code": "def logpdf(self, X, mean = None, rowcov = 1, colcov = 1): dims, mean, rowcov, colcov = self._process_parameters(mean, rowcov, colcov) X = self._process_quantiles(X, dims) rowpsd = _PSD(rowcov, allow_singular = False) colpsd = _PSD(colcov, allow_singular = False) out = self._logpdf(dims, X, mean, rowpsd.U, rowpsd.log_pdet, colpsd.U, colpsd.log_pdet) return _squeeze_output(out)", - "docstring": "Log of the matrix normal probability density function. Parameters ---------- X : array_like Quantiles, with the last two axes of denoting the components. %(_matnorm_doc_default_callparams)s Returns ------- logpdf : ndarray Log of the probability density function evaluated at Notes ----- %(_matnorm_doc_callparams_note)s", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_multivariate.py", - "ast_data": "FunctionDef name:logpdf arguments arg:self arg:X arg:mean arg:rowcov arg:colcov Assign Call call:_process_parameters Assign Call call:_process_quantiles Assign Call call:_PSD Assign Call call:_PSD Assign Call call:_logpdf Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, cluster_spec, master = '', task_type = None, task_id = None, environment = '', num_accelerators = None, rpc_layer = None): super(SimpleClusterResolver, self).__init__() self._task_type = task_type self._task_id = task_id self._environment = environment self._num_accelerators = num_accelerators self._rpc_layer = rpc_layer if not isinstance(cluster_spec, ClusterSpec): raise TypeError('cluster_spec must be a `tf.train.ClusterSpec`.') self._cluster_spec = cluster_spec if not isinstance(master, str): raise TypeError('master must be a string.') self._master = master", - "docstring": "Creates a SimpleClusterResolver from a ClusterSpec.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\cluster_resolver\\cluster_resolver.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:cluster_spec arg:master arg:task_type arg:task_id arg:environment arg:num_accelerators arg:rpc_layer Assign Assign Assign Assign Assign If Raise raises:TypeError('cluster_spec must be a `tf.train.ClusterSpec`.') Assign If Raise raises:TypeError('master must be a string.') Assign" - }, - { - "library": "tensorflow", - "name": "limits", - "source_code": "@property def limits(self, clip_negative = True): if self.as_numpy_dtype in dtype_range: min, max = dtype_range[self.as_numpy_dtype] else: raise ValueError(str(self) + ' does not have defined limits.') if clip_negative: min = 0 return (min, max)", - "docstring": "Return intensity limits, i.e. (min, max) tuple, of the dtype. Args: clip_negative : bool, optional If True, clip the negative range (i.e. return 0 for min intensity) even if the image dtype allows negative values. Returns min, max : tuple Lower and upper intensity limits.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\dtypes.py", - "ast_data": "FunctionDef name:limits arguments arg:self arg:clip_negative If Compare op:In Assign Raise raises:ValueError(str(self) + ' does not have defined limits.') If Assign Return return:yes" - }, - { - "library": "pandas", - "name": "infer_compression", - "source_code": "@doc(compression_options = _shared_docs['compression_options'] % 'filepath_or_buffer') def infer_compression(filepath_or_buffer: FilePath | BaseBuffer, compression: str | None) -> str | None: if compression is None: return None if compression = = 'infer': if isinstance(filepath_or_buffer, str) and ': : ' in filepath_or_buffer: filepath_or_buffer = filepath_or_buffer.split(': : ')[0] filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like = True) if not isinstance(filepath_or_buffer, str): return None for extension, compression in extension_to_compression.items(): if filepath_or_buffer.lower().endswith(extension): return compression return None if compression in _supported_compressions: return compression valid = ['infer', None] + sorted(_supported_compressions) msg = f'Unrecognized compression type: {compression}\\nValid compression types are {valid}' raise ValueError(msg)", - "docstring": "Get the compression method for filepath_or_buffer. If compression='infer', the inferred compression method is returned. Otherwise, the input compression method is returned unchanged, unless it's invalid, in which case an error is raised. Parameters ---------- filepath_or_buffer : str or file handle File path or object. {compression_options} .. versionchanged:: 1.4.0 Zstandard support. Returns ------- string or None Raises ------ ValueError on invalid compression specified.", - "type": "function", - "file_path": "pandas\\pandas\\io\\common.py", - "ast_data": "FunctionDef name:infer_compression arguments arg:filepath_or_buffer type:FilePath | BaseBuffer arg:compression type:str | None Call call:doc If Compare op:Is Return return:yes If Compare op:Eq If BoolOp Call call:isinstance Compare op:In Assign Assign Call call:stringify_path If Return return:yes For Call call:items If Call call:endswith Return return:yes Return return:yes If Compare op:In Return return:yes Assign Assign Raise raises:ValueError(msg)" - }, - { - "library": "matplotlib", - "name": "get_font_preamble", - "source_code": "@classmethod def get_font_preamble(cls): font_preamble, command = cls._get_font_preamble_and_command() return font_preamble", - "docstring": "Return a string containing font configuration for the tex preamble.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\texmanager.py", - "ast_data": "FunctionDef name:get_font_preamble arguments arg:cls Assign Call call:_get_font_preamble_and_command Return return:yes" - }, - { - "library": "tensorflow", - "name": "load_model", - "source_code": "def load_model(filepath, custom_objects = None, compile = True, options = None): with generic_utils.SharedObjectLoadingScope(): with generic_utils.CustomObjectScope(custom_objects or {}): with load_context.load_context(options): if h5py is not None and (isinstance(filepath, h5py.File) or h5py.is_hdf5(filepath)): return hdf5_format.load_model_from_hdf5(filepath, custom_objects, compile) filepath = path_to_string(filepath) if isinstance(filepath, str): return saved_model_load.load(filepath, compile, options) raise IOError('Unable to load model. Filepath is not an hdf5 file (or h5py is not available) or SavedModel.')", - "docstring": "Loads a model saved via . Usage: >>> model = tf.keras.Sequential([ ... tf.keras.layers.Dense(5, input_shape=(3,)), ... tf.keras.layers.Softmax()]) >>> model.save('/tmp/model') >>> loaded_model = tf.keras.models.load_model('/tmp/model') >>> x = tf.random.uniform((10, 3)) >>> assert np.allclose(model.predict(x), loaded_model.predict(x)) Note that the model weights may have different scoped names after being loaded. Scoped names include the model/layer names, such as . It is recommended that you use the layer properties to access specific variables, e.g. . Args: filepath: One of the following: - String or object, path to the saved model - object from which to load the model custom_objects: Optional dictionary mapping names (strings) to custom classes or functions to be considered during deserialization. compile: Boolean, whether to compile the model after loading. options: Optional object that specifies options for loading from SavedModel. Returns: A Keras model instance. If the original model was compiled, and saved with the optimizer, then the returned model will be compiled. Otherwise, the model will be left uncompiled. In the case that an uncompiled model is returned, a warning is displayed if the argument is set to . Raises: ImportError: if loading from an hdf5 file and h5py is not available. IOError: In case of an invalid savefile.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\save.py", - "ast_data": "FunctionDef name:load_model arguments arg:filepath arg:custom_objects arg:compile arg:options With With With If BoolOp Compare op:IsNot BoolOp Call call:isinstance Call call:is_hdf5 Return return:yes Assign Call call:path_to_string If Call call:isinstance Return return:yes Raise raises:IOError('Unable to load model. Filepath is not an hdf5 file (or h5py is not available) or SavedModel.')" - }, - { - "library": "pytorch", - "name": "track_dynamism_across_examples", - "source_code": "def track_dynamism_across_examples(example_inputs: list[Any]) -> dict[Any, Any]: tracking: dict[KeyPath, tuple[list[set[Any]], bool]] = {} for ex in example_inputs: if 'self' in ex and isinstance(ex['self'], torch.nn.Module): ex['self'] = module_to_nested_dict(ex['self']) leaves_with_paths, _ = tree_flatten_with_path(ex) for key_path, value in leaves_with_paths: if not isinstance(value, (int, float, torch.Tensor)): continue if isinstance(value, torch.Tensor): shape: tuple[int | float, ...] = tuple(value.shape) is_tensor = True else: shape = (value,) is_tensor = False if key_path not in tracking: tracking[key_path] = ([set() for _ in range(len(shape))], is_tensor) else: dim_sets, flag = tracking[key_path] if flag ! = is_tensor: pass while len(dim_sets) < len(shape): dim_sets.append(set()) for i, dim in enumerate(shape): tracking[key_path][0][i].add(dim) output: dict[Any, Any] = {} for key_path, (dim_sets, _is_tensor) in tracking.items(): final_dyn = tuple((len(s) > 1 for s in dim_sets)) key_str = 'L' + ''.join((f'{str(k)}' for k in key_path)) key = key_path[0].key if key not in output: output[key] = {} output[key][key_str] = final_dyn return output", - "docstring": "This function analyzes a list of example inputs to determine the dynamism of their shapes. It tracks whether the dimensions of tensors or non-tensor values change across different examples. The function returns a dictionary where each key represents a path to a value in the input examples, and the corresponding value is a tuple indicating which dimensions are dynamic (i.e., change across examples). This helps in understanding how the structure of data varies across different instances.", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\_dynamism.py", - "ast_data": "FunctionDef name:track_dynamism_across_examples arguments arg:example_inputs type:list[Any] For If BoolOp Compare op:In Call call:isinstance Assign Call call:module_to_nested_dict Assign Call call:tree_flatten_with_path For If If Call call:isinstance Assign Assign Assign If Compare op:NotIn Assign Assign If Compare op:NotEq While Compare op:Lt For Call call:enumerate For Call call:items Assign Call call:tuple Assign Assign If Compare op:NotIn Assign Assign Return return:yes" - }, - { - "library": "feincms", - "name": "for_request", - "source_code": "def for_request(self, request, raise404 = False, best_match = False, path = None): if not hasattr(request, '_feincms_page'): path = path or request.path_info or request.path if best_match: request._feincms_page = self.best_match_for_path(path, raise404 = raise404) else: request._feincms_page = self.page_for_path(path, raise404 = raise404) return request._feincms_page", - "docstring": "Return a page for the request Does not hit the database more than once for the same request. Examples:: Page.objects.for_request(request, raise404=True, best_match=False) Defaults to raising a `` exception if no exact match could be determined.", - "type": "method", - "file_path": "feincms\\feincms\\module\\page\\models.py", - "ast_data": "FunctionDef name:for_request arguments arg:self arg:request arg:raise404 arg:best_match arg:path If Assign BoolOp If Assign Call call:best_match_for_path Assign Call call:page_for_path Return return:yes" - }, - { - "library": "mongo", - "name": "open_download_stream", - "source_code": "async def open_download_stream(self, file_id: Any, session: Optional[AsyncClientSession] = None) -> AsyncGridOut: gout = AsyncGridOut(self._collection, file_id, session = session) await gout.open() return gout", - "docstring": "Opens a Stream from which the application can read the contents of the stored file specified by file_id. For example:: my_db = MongoClient().test fs = GridFSBucket(my_db) # get _id of file to read. file_id = fs.upload_from_stream(\"test_file\", \"data I want to store!\") grid_out = fs.open_download_stream(file_id) contents = grid_out.read() Returns an instance of :class:. Raises :exc: if no file with file_id exists. :param file_id: The _id of the file to be downloaded. :param session: a :class: .. versionchanged:: 3.6 Added `` parameter.", - "type": "method", - "file_path": "mongo\\gridfs\\asynchronous\\grid_file.py", - "ast_data": "AsyncFunctionDef name:open_download_stream arguments arg:self arg:file_id type:Any arg:session type:Optional[AsyncClientSession] Assign Call call:AsyncGridOut Return return:yes" - }, - { - "library": "numpy", - "name": "split_by_unquoted", - "source_code": "def split_by_unquoted(line, characters): assert not set('\"\\'') & set(characters), 'cannot split by unquoted quotes' r = re.compile('\\\\A(?P({single_quoted}|{double_quoted}|{not_quoted})*)(?P{char}.*)\\\\Z'.format(not_quoted = f\"\"\"[^\"'{re.escape(characters)}]\"\"\", char = f'[{re.escape(characters)}]', single_quoted = \"('([^'\\\\\\\\]|(\\\\\\\\.))*')\", double_quoted = '(\"([^\"\\\\\\\\]|(\\\\\\\\.))*\")')) m = r.match(line) if m: d = m.groupdict() return (d['before'], d['after']) return (line, '')", - "docstring": "Splits the line into (line[:i], line[i:]), where i is the index of first occurrence of one of the characters not within quotes, or len(line) if no such index exists", - "type": "function", - "file_path": "numpy\\numpy\\f2py\\crackfortran.py", - "ast_data": "FunctionDef name:split_by_unquoted arguments arg:line arg:characters Assign Call call:compile Assign Call call:match If Assign Call call:groupdict Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "load_state_dict", - "source_code": "def load_state_dict(self, state_dict: dict[str, Any]) -> None: state = state_dict['state'] data_groups, defaults = (state_dict['data_groups'], state_dict['defaults']) self.__set_state__({'state': state, 'data_groups': data_groups, 'defaults': defaults})", - "docstring": "The load_state_dict() restores the state of the sparsifier based on the state_dict Args: * state_dict - the dictionary that to which the current sparsifier needs to be restored to", - "type": "method", - "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py", - "ast_data": "FunctionDef name:load_state_dict arguments arg:self arg:state_dict type:dict[str, Any] Assign Assign" - }, - { - "library": "pytorch", - "name": "set_backend_pattern_config", - "source_code": "def set_backend_pattern_config(self, config: BackendPatternConfig) -> BackendConfig: pattern_complex_format = torch.ao.quantization.backend_config.utils._get_pattern_in_reversed_nested_tuple_format(config) self._pattern_complex_format_to_config[pattern_complex_format] = config return self", - "docstring": "Set the config for an pattern that can be run on the target backend. This overrides any existing config for the given pattern.", - "type": "method", - "file_path": "pytorch\\torch\\ao\\quantization\\backend_config\\backend_config.py", - "ast_data": "FunctionDef name:set_backend_pattern_config arguments arg:self arg:config type:BackendPatternConfig Assign Call call:_get_pattern_in_reversed_nested_tuple_format Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "tree_iter", - "source_code": "def tree_iter(tree: PyTree, is_leaf: Optional[Callable[[PyTree], bool]] = None) -> Iterable[Any]: return optree.tree_iter(tree, is_leaf = is_leaf, none_is_leaf = True, namespace = 'torch')", - "docstring": "Get an iterator over the leaves of a pytree. See also :func:. >>> tree = {\"b\": (2, [3, 4]), \"a\": 1, \"c\": None, \"d\": 5} >>> list(tree_iter(tree)) [2, 3, 4, 1, None, 5] >>> list(tree_iter(1)) [1] >>> list(tree_iter(None)) [None] Args: tree (pytree): A pytree to flatten. is_leaf (callable, optional): An extra leaf predicate function that will be called at each flattening step. The function should have a single argument with signature `True`, the whole subtree being treated as a leaf. Otherwise, the default pytree registry will be used to determine a node is a leaf or not. If the function is not specified, the default pytree registry will be used. Returns: An iterator over the leaf values.", - "type": "function", - "file_path": "pytorch\\torch\\utils\\_cxx_pytree.py", - "ast_data": "FunctionDef name:tree_iter arguments arg:tree type:PyTree arg:is_leaf type:Optional[Callable[[PyTree], bool]] Return return:yes" - }, - { - "library": "pytorch", - "name": "localize_nodes", - "source_code": "def localize_nodes(self, nodes: list[ir.IRNode], rewrite_index: Callable[['LocalizeBufferHandler', sympy.Expr, str], sympy.Expr] = rewrite_index_for_nodes) -> list[ir.IRNode]: assert len(nodes) > 0 def wrap_inner_fn_for_node(node: ir.IRNode): loops = node.data if isinstance(node, ir.ComputedBuffer) else node assert isinstance(loops, ir.Loops) new_inner_fn = self.localize_function(loops.inner_fn, rewrite_index) new_loops = dataclasses.replace(loops, inner_fn = new_inner_fn) if isinstance(node, ir.ComputedBuffer): new_node = ir.ComputedBuffer(name = node.get_name(), layout = node.get_layout(), data = new_loops) else: new_node = new_loops return new_node return [wrap_inner_fn_for_node(node) for node in nodes]", - "docstring": "Given and registered in current though the method of , localizes the to for the given and returns a new list of IR nodes that work on instead of , i.e., all the loads and stores are redirected to . This helps the fused loops to work on smaller-sized local buffers for better data locality. The the data access of is assumed to be contiguous with the same order as the .", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\codegen\\cpp_utils.py", - "ast_data": "FunctionDef name:localize_nodes arguments arg:self arg:nodes type:list[ir.IRNode] arg:rewrite_index type:Callable[['LocalizeBufferHandler', sympy.Expr, str], sympy.Expr] FunctionDef name:wrap_inner_fn_for_node arguments arg:node type:ir.IRNode Assign Assign Call call:localize_function Assign Call call:replace If Call call:isinstance Assign Call call:ComputedBuffer Assign Return return:yes Return return:yes" - }, - { - "library": "django", - "name": "do_static", - "source_code": "@register.tag('static') def do_static(parser, token): return StaticNode.handle_token(parser, token)", - "docstring": "Join the given path with the STATIC_URL setting. Usage:: {% static path [as varname] %} Examples:: {% static \"myapp/css/base.css\" %} {% static variable_with_path %} {% static \"myapp/css/base.css\" as admin_base_css %} {% static variable_with_path as varname %}", - "type": "function", - "file_path": "django\\django\\templatetags\\static.py", - "ast_data": "FunctionDef name:do_static arguments arg:parser arg:token Call call:tag Return return:yes" - }, - { - "library": "pytorch", - "name": "get_signature_for_torch_op", - "source_code": "@compatibility(is_backward_compatible = False) def get_signature_for_torch_op(op: Callable, return_schemas: bool = False): if isinstance(op, OpOverload): schemas = [op._schema] elif isinstance(op, OpOverloadPacket): schemas = [getattr(op, overload)._schema for overload in op.overloads()] else: override = _manual_overrides.get(op) if override: return (override, None) if return_schemas else None aten_fn = torch.jit._builtins._find_builtin(op) if aten_fn is None: return (None, None) if return_schemas else None schemas = torch._C._jit_get_schemas_for_operator(aten_fn) signatures = [_torchscript_schema_to_signature(schema) for schema in schemas] return (signatures, schemas) if return_schemas else signatures", - "docstring": "Given an operator on the namespace, return a list of objects corresponding to the overloads of that op.. May return if a signature could not be retrieved. Args: op (Callable): An operator on the namespace to look up a signature for Returns: Optional[List[inspect.Signature]]: A list of signatures for the overloads of this operator, or None if the operator signatures could not be retrieved. If return_schemas=True, returns a tuple containing the optional Python signatures and the optional TorchScript Function signature", - "type": "function", - "file_path": "pytorch\\torch\\fx\\operator_schemas.py", - "ast_data": "FunctionDef name:get_signature_for_torch_op arguments arg:op type:Callable arg:return_schemas type:bool Call call:compatibility If Call call:isinstance Assign If Call call:isinstance Assign Assign Call call:get If Return return:yes Assign Call call:_find_builtin If Compare op:Is Return return:yes Assign Call call:_jit_get_schemas_for_operator Assign Return return:yes" - }, - { - "library": "pandas", - "name": "construct_array_type", - "source_code": "@classmethod def construct_array_type(cls) -> type_t[NumpyExtensionArray]: from pandas.core.arrays import NumpyExtensionArray return NumpyExtensionArray", - "docstring": "Return the array type associated with this dtype. Returns ------- type", - "type": "method", - "file_path": "pandas\\pandas\\core\\dtypes\\dtypes.py", - "ast_data": "FunctionDef name:construct_array_type arguments arg:cls Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, funcs, trackable_obj = None): super(TFLiteFrozenGraphConverterV2, self).__init__() self._funcs = funcs self._trackable_obj = trackable_obj self.experimental_lower_to_saved_model = True", - "docstring": "Constructor for TFLiteConverter. Args: funcs: List of TensorFlow ConcreteFunctions. The list should not contain duplicate elements. trackable_obj: tf.AutoTrackable object associated with . A reference to this object needs to be maintained so that Variables do not get garbage collected since functions have a weak reference to Variables. This is only required when the tf.AutoTrackable object is not maintained by the user (e.g. ).", - "type": "method", - "file_path": "tensorflow\\tensorflow\\lite\\python\\lite.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:funcs arg:trackable_obj Assign Assign Assign" - }, - { - "library": "seaborn", - "name": "__init__", - "source_code": "def __init__(self, estimator, errorbar = None, **boot_kws): if estimator ! = 'mean': raise ValueError(f\"Weighted estimator must be 'mean', not {estimator!r}.\") self.estimator = estimator method, level = _validate_errorbar_arg(errorbar) if method is not None and method ! = 'ci': raise ValueError(f\"Error bar method must be 'ci', not {method!r}.\") self.error_method = method self.error_level = level self.boot_kws = boot_kws", - "docstring": "Data aggregator that produces a weighted estimate and error bar interval. Parameters ---------- estimator : string Function (or method name) that maps a vector to a scalar. Currently supports only \"mean\". errorbar : string or (string, number) tuple Name of errorbar method or a tuple with a method name and a level parameter. Currently the only supported method is \"ci\". boot_kws Additional keywords are passed to bootstrap when error_method is \"ci\".", - "type": "method", - "file_path": "seaborn\\seaborn\\_statistics.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:estimator arg:errorbar kwarg:boot_kws If Compare op:NotEq Raise raises:ValueError(f\"Weighted estimator must be 'mean', not {estimator!r}.\") Assign Assign Call call:_validate_errorbar_arg If BoolOp Compare op:IsNot Compare op:NotEq Raise raises:ValueError(f\"Error bar method must be 'ci', not {method!r}.\") Assign Assign Assign" - }, - { - "library": "tensorflow", - "name": "grad_sync", - "source_code": "@property def grad_sync(self): if self._grad_sync is None: with ops.control_dependencies(None): self._grad_sync = control_flow_ops.control_trigger(name = 'b_sync') self._grad_sync._set_control_flow_context(self._grad_context) self._grad_index.op._add_control_input(self._grad_sync) if self._grad_context.outer_context: self._grad_context.outer_context.AddInnerOp(self._grad_sync) return self._grad_sync", - "docstring": "A control trigger node for synchronization in the grad loop. One main use is to keep the pop ops of a stack executed in the iteration order.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py", - "ast_data": "FunctionDef name:grad_sync arguments arg:self If Compare op:Is With Assign Call call:control_trigger If Return return:yes" - }, - { - "library": "django", - "name": "fields", - "source_code": "@property def fields(self): return [force_str(capi.get_field_name(capi.get_field_defn(self._ldefn, i)), self._ds.encoding, strings_only = True) for i in range(self.num_fields)]", - "docstring": "Return a list of string names corresponding to each of the Fields available in this Layer.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\layer.py", - "ast_data": "FunctionDef name:fields arguments arg:self Return return:yes" - }, - { - "library": "pandas", - "name": "reorder_levels", - "source_code": "def reorder_levels(self, order) -> MultiIndex: order = [self._get_level_number(i) for i in order] result = self._reorder_ilevels(order) return result", - "docstring": "Rearrange levels using input order. May not drop or duplicate levels. is useful when you need to change the order of levels in a MultiIndex, such as when reordering levels for hierarchical indexing. It maintains the integrity of the MultiIndex, ensuring that all existing levels are present and no levels are duplicated. This method is helpful for aligning the index structure with other data structures or for optimizing the order for specific data operations. Parameters ---------- order : list of int or list of str List representing new level order. Reference level by number (position) or by key (label). Returns ------- MultiIndex A new MultiIndex with levels rearranged according to the specified order. See Also -------- MultiIndex.swaplevel : Swap two levels of the MultiIndex. MultiIndex.set_names : Set names for the MultiIndex levels. DataFrame.reorder_levels : Reorder levels in a DataFrame with a MultiIndex. Examples -------- >>> mi = pd.MultiIndex.from_arrays([[1, 2], [3, 4]], names=[\"x\", \"y\"]) >>> mi MultiIndex([(1, 3), (2, 4)], names=['x', 'y']) >>> mi.reorder_levels(order=[1, 0]) MultiIndex([(3, 1), (4, 2)], names=['y', 'x']) >>> mi.reorder_levels(order=[\"y\", \"x\"]) MultiIndex([(3, 1), (4, 2)], names=['y', 'x'])", - "type": "method", - "file_path": "pandas\\pandas\\core\\indexes\\multi.py", - "ast_data": "FunctionDef name:reorder_levels arguments arg:self arg:order Assign Assign Call call:_reorder_ilevels Return return:yes" - }, - { - "library": "pytorch", - "name": "is_available", - "source_code": "def is_available() -> bool: return hasattr(torch._C, '_c10d_init')", - "docstring": "Return `` for MacOS.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\__init__.py", - "ast_data": "FunctionDef name:is_available arguments Return return:yes" - }, - { - "library": "scipy", - "name": "delta_t", - "source_code": "@property def delta_t(self) -> float: return self.T * self.hop", - "docstring": "Time increment of STFT. The time increment = * represents the sample increment converted to time based on the sampling interval . See Also -------- delta_f: Width of the frequency bins of the STFT. hop: Hop size in signal samples for sliding window. t: Times of STFT for an input signal with samples. T: Sampling interval of input signal and window . ShortTimeFFT: Class this property belongs to", - "type": "method", - "file_path": "scipy\\scipy\\signal\\_short_time_fft.py", - "ast_data": "FunctionDef name:delta_t arguments arg:self Return return:yes" - }, - { - "library": "numpy", - "name": "swapcase", - "source_code": "def swapcase(self): return asarray(swapcase(self))", - "docstring": "For each element in , return a copy of the string with uppercase characters converted to lowercase and vice versa. See Also -------- char.swapcase", - "type": "method", - "file_path": "numpy\\numpy\\_core\\defchararray.py", - "ast_data": "FunctionDef name:swapcase arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "logm", - "source_code": "@_apply_over_batch(('A', 2)) def logm(A, disp = _NoValue): if disp is _NoValue: disp = True else: warnings.warn('The `disp` argument is deprecated and will be removed in SciPy 1.18.0.', DeprecationWarning, stacklevel = 2) A = np.asarray(A) import scipy.linalg._matfuncs_inv_ssq F = scipy.linalg._matfuncs_inv_ssq._logm(A) F = _maybe_real(A, F) errtol = 1000 * eps with np.errstate(divide = 'ignore', invalid = 'ignore'): errest = norm(expm(F) - A, 1) / np.asarray(norm(A, 1), dtype = A.dtype).real[()] if disp: if not isfinite(errest) or errest > = errtol: message = f'logm result may be inaccurate, approximate err = {errest}' warnings.warn(message, RuntimeWarning, stacklevel = 2) return F else: return (F, errest)", - "docstring": "Compute matrix logarithm. The matrix logarithm is the inverse of expm: expm(logm()) == Parameters ---------- A : (N, N) array_like Matrix whose logarithm to evaluate disp : bool, optional Emit warning if error in the result is estimated large instead of returning estimated error. (Default: True) .. deprecated:: 1.16.0 The argument is deprecated and will be removed in SciPy 1.18.0. The previously returned error estimate can be computed as `A` errest : float (if disp == False) 1-norm of the estimated error, ||err||_1 / ||A||_1 References ---------- .. [1] Awad H. Al-Mohy and Nicholas J. Higham (2012) \"Improved Inverse Scaling and Squaring Algorithms for the Matrix Logarithm.\" SIAM Journal on Scientific Computing, 34 (4). C152-C169. ISSN 1095-7197 .. [2] Nicholas J. Higham (2008) \"Functions of Matrices: Theory and Computation\" ISBN 978-0-898716-46-7 .. [3] Nicholas J. Higham and Lijing lin (2011) \"A Schur-Pade Algorithm for Fractional Powers of a Matrix.\" SIAM Journal on Matrix Analysis and Applications, 32 (3). pp. 1056-1078. ISSN 0895-4798 Examples -------- >>> import numpy as np >>> from scipy.linalg import logm, expm >>> a = np.array([[1.0, 3.0], [1.0, 4.0]]) >>> b = logm(a) >>> b array([[-1.02571087, 2.05142174], [ 0.68380725, 1.02571087]]) >>> expm(b) # Verify expm(logm(a)) returns a array([[ 1., 3.], [ 1., 4.]])", - "type": "function", - "file_path": "scipy\\scipy\\linalg\\_matfuncs.py", - "ast_data": "FunctionDef name:logm arguments arg:A arg:disp Call call:_apply_over_batch If Compare op:Is Assign Assign Call call:asarray Assign Call call:_logm Assign Call call:_maybe_real Assign With Assign If If BoolOp Compare op:GtE Assign Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_local_ip", - "source_code": "def get_local_ip(self): return _request_compute_metadata('instance/network-interfaces/0/ip')", - "docstring": "Return the local ip address of the Google Cloud VM the workload is running on.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\tpu\\client\\client.py", - "ast_data": "FunctionDef name:get_local_ip arguments arg:self Return return:yes" - }, - { - "library": "scikit-learn", - "name": "split", - "source_code": "def split(self, X, y, groups = None): if groups is not None: warnings.warn(f'The groups parameter is ignored by {self.__class__.__name__}', UserWarning) y = check_array(y, input_name = 'y', ensure_2d = False, dtype = None) return super().split(X, y, groups)", - "docstring": "Generate indices to split data into training and test set. Parameters ---------- X : array-like of shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. Note that providing `random_state` to an integer.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\model_selection\\_split.py", - "ast_data": "FunctionDef name:split arguments arg:self arg:X arg:y arg:groups If Compare op:IsNot Assign Call call:check_array Return return:yes" - }, - { - "library": "authlib", - "name": "register_grant", - "source_code": "def register_grant(self, grant_cls, extensions = None): if hasattr(grant_cls, 'check_authorization_endpoint'): self._authorization_grants.append((grant_cls, extensions)) if hasattr(grant_cls, 'check_token_endpoint'): self._token_grants.append((grant_cls, extensions))", - "docstring": "Register a grant class into the endpoint registry. Developers can implement the grants in `` and register with this method:: class AuthorizationCodeGrant(grants.AuthorizationCodeGrant): def authenticate_user(self, credential): # ... authorization_server.register_grant(AuthorizationCodeGrant) :param grant_cls: a grant class. :param extensions: extensions for the grant class.", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\rfc6749\\authorization_server.py", - "ast_data": "FunctionDef name:register_grant arguments arg:self arg:grant_cls arg:extensions If Call call:hasattr If Call call:hasattr" - }, - { - "library": "mongo", - "name": "auto_encryption_opts", - "source_code": "@property def auto_encryption_opts(self) -> Optional[AutoEncryptionOpts]: return self.__auto_encryption_opts", - "docstring": "A :class: or None.", - "type": "method", - "file_path": "mongo\\pymongo\\client_options.py", - "ast_data": "FunctionDef name:auto_encryption_opts arguments arg:self Return return:yes" - }, - { - "library": "numpy", - "name": "forward_bytes_to_stdout", - "source_code": "def forward_bytes_to_stdout(val): if hasattr(sys.stdout, 'buffer'): sys.stdout.buffer.write(val) elif hasattr(sys.stdout, 'encoding'): sys.stdout.write(val.decode(sys.stdout.encoding)) else: sys.stdout.write(val.decode('utf8', errors = 'replace'))", - "docstring": "Forward bytes from a subprocess call to the console, without attempting to decode them. The assumption is that the subprocess call already returned bytes in a suitable encoding.", - "type": "function", - "file_path": "numpy\\numpy\\distutils\\exec_command.py", - "ast_data": "FunctionDef name:forward_bytes_to_stdout arguments arg:val If Call call:hasattr If Call call:hasattr" - }, - { - "library": "pytorch", - "name": "identify_mutated_tensors", - "source_code": "def identify_mutated_tensors(kernel: 'TritonKernelType', kwargs: dict[str, Any]) -> list[str]: ttir_module = None functions = None try: ttir_module, ordered_tensor_names = generate_ttir(kernel, kwargs) functions = ttir_to_functions(ttir_module) assert functions is not None kernel_name = next(iter(functions.keys())) assert kernel.fn.__name__ in kernel_name analyze_kernel_mutations.reset() mutations = analyze_kernel_mutations(functions, kernel_name, len(ordered_tensor_names)) return [ordered_tensor_names[i] for i, mutated in enumerate(mutations) if mutated] except Exception: log.warning('Encountered an exception in identify_mutated_tensors, assuming every input is mutated', exc_info = True) if ttir_module is not None: log.debug('TTIR: \\n%s', str(ttir_module)) if functions is not None: log.debug('functions: ') for name, fn in functions.items(): log.debug(' = = = \\t%s\\t = = = ', name) for ret, ops in fn.items(): log.debug('%s\\t = >\\t%s', ret, ops) return [key for key, value in kwargs.items() if isinstance(value, Tensor)]", - "docstring": "Given a triton kernel and the arguments for this kernel, this function 1) Retrieves the TTIR converted version of the kernel from Triton's API. 2) Parses the TTIR and creates a control flow graph 3) Analyzes the graph to detect all input tensor mutations", - "type": "function", - "file_path": "pytorch\\torch\\_higher_order_ops\\triton_kernel_wrap.py", - "ast_data": "FunctionDef name:identify_mutated_tensors arguments arg:kernel type:'TritonKernelType' arg:kwargs type:dict[str, Any] Assign Assign Try Assign Call call:generate_ttir Assign Call call:ttir_to_functions Assign Call call:next Assign Call call:analyze_kernel_mutations Return return:yes ExceptHandler If Compare op:IsNot If Compare op:IsNot For Call call:items For Call call:items Return return:yes" - }, - { - "library": "scipy", - "name": "rvs", - "source_code": "def rvs(self, size = 1): size1d = tuple(np.atleast_1d(size)) N = np.prod(size1d) x = np.zeros(N) simulated, i = (0, 1) while simulated < N: k = N - simulated u1 = self._umax * self._rng.uniform(size = k) v1 = self._rng.uniform(self._vmin, self._vmax, size = k) rvs = v1 / u1 + self._c accept = u1 ** 2 < = self._pdf(rvs) num_accept = np.sum(accept) if num_accept > 0: x[simulated: simulated + num_accept] = rvs[accept] simulated + = num_accept if simulated = = 0 and i * N > = 50000: msg = f'Not a single random variate could be generated in {i * N} attempts. The ratio of uniforms method does not appear to work for the provided parameters. Please check the pdf and the bounds.' raise RuntimeError(msg) i + = 1 return np.reshape(x, size1d)", - "docstring": "Sampling of random variates Parameters ---------- size : int or tuple of ints, optional Number of random variates to be generated (default is 1). Returns ------- rvs : ndarray The random variates distributed according to the probability distribution defined by the pdf.", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_sampling.py", - "ast_data": "FunctionDef name:rvs arguments arg:self arg:size Assign Call call:tuple Assign Call call:prod Assign Call call:zeros Assign While Compare op:Lt Assign Assign Assign Call call:uniform Assign Assign Compare op:LtE Assign Call call:sum If Compare op:Gt Assign If BoolOp Compare op:Eq Compare op:GtE Assign Raise raises:RuntimeError(msg) Return return:yes" - }, - { - "library": "scrapy", - "name": "getint", - "source_code": "def getint(self, name: _SettingsKeyT, default: int = 0) -> int: return int(self.get(name, default))", - "docstring": "Get a setting value as an int. :param name: the setting name :type name: str :param default: the value to return if no setting is found :type default: object", - "type": "method", - "file_path": "scrapy\\scrapy\\settings\\__init__.py", - "ast_data": "FunctionDef name:getint arguments arg:self arg:name type:_SettingsKeyT arg:default type:int Return return:yes" - }, - { - "library": "scipy", - "name": "time_spherical_polygon_area_calculation", - "source_code": "def time_spherical_polygon_area_calculation(self, num_points, ndim): self.sv.calculate_areas()", - "docstring": "Time the area calculation in the Spherical Voronoi code.", - "type": "method", - "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py", - "ast_data": "FunctionDef name:time_spherical_polygon_area_calculation arguments arg:self arg:num_points arg:ndim" - }, - { - "library": "pytorch", - "name": "__repr__", - "source_code": "def __repr__(self) -> str: return f'Shard(dim = {self.dim})'", - "docstring": "machine readable representation of the Shard placement", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\tensor\\placement_types.py", - "ast_data": "FunctionDef name:__repr__ arguments arg:self Return return:yes" - }, - { - "library": "algorithms", - "name": "edmonds_karp", - "source_code": "def edmonds_karp(capacity, source, sink): vertices = len(capacity) ret = 0 flow = [[0] * vertices for _ in range(vertices)] while True: tmp = 0 queue = Queue() visit = [False for _ in range(vertices)] par = [-1 for _ in range(vertices)] visit[source] = True queue.put((source, 1 << 63)) while queue.qsize(): front = queue.get() idx, current_flow = front if idx = = sink: tmp = current_flow break for nxt in range(vertices): if not visit[nxt] and flow[idx][nxt] < capacity[idx][nxt]: visit[nxt] = True par[nxt] = idx queue.put((nxt, min(current_flow, capacity[idx][nxt] - flow[idx][nxt]))) if par[sink] = = -1: break ret + = tmp parent = par[sink] idx = sink while parent ! = -1: flow[parent][idx] + = tmp flow[idx][parent] - = tmp idx = parent parent = par[parent] return ret", - "docstring": "Computes maximum flow from source to sink using BFS. Time complexity : O(V*E^2) V is the number of vertices and E is the number of edges.", - "type": "function", - "file_path": "algorithms\\algorithms\\graph\\maximum_flow.py", - "ast_data": "FunctionDef name:edmonds_karp arguments arg:capacity arg:source arg:sink Assign Call call:len Assign Assign While Assign Assign Call call:Queue Assign Assign Assign While Call call:qsize Assign Call call:get Assign If Compare op:Eq Assign For Call call:range If BoolOp Compare op:Lt Assign Assign If Compare op:Eq Assign Assign While Compare op:NotEq Assign Assign Return return:yes" - }, - { - "library": "django", - "name": "assemble_as_sql", - "source_code": "def assemble_as_sql(self, fields, value_rows): if not value_rows: return ([], []) get_placeholders = [getattr(field, 'get_placeholder', None) for field in fields] rows_of_fields_as_sql = ((self.field_as_sql(field, get_placeholder, value) for field, get_placeholder, value in zip(fields, get_placeholders, row)) for row in value_rows) sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql) placeholder_rows, param_rows = zip(*sql_and_param_pair_rows) param_rows = [[p for ps in row for p in ps] for row in param_rows] return (placeholder_rows, param_rows)", - "docstring": "Take a sequence of N fields and a sequence of M rows of values, and generate placeholder SQL and parameters for each field and value. Return a pair containing: * a sequence of M rows of N SQL placeholder strings, and * a sequence of M rows of corresponding parameter values. Each placeholder string may contain any number of '%s' interpolation strings, and each parameter row will contain exactly as many params as the total number of '%s's in the corresponding placeholder row.", - "type": "method", - "file_path": "django\\django\\db\\models\\sql\\compiler.py", - "ast_data": "FunctionDef name:assemble_as_sql arguments arg:self arg:fields arg:value_rows If Return return:yes Assign Assign Assign Assign Call call:zip Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "parent_nodes", - "source_code": "@property def parent_nodes(self): node_deps = [] for kt in self.keras_inputs: layer = kt._keras_history.layer node_index = kt._keras_history.node_index if layer is not None: node_deps.append(layer._inbound_nodes[node_index]) return node_deps", - "docstring": "Returns all the s whose output this node immediately depends on.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\node.py", - "ast_data": "FunctionDef name:parent_nodes arguments arg:self Assign For Assign Assign If Compare op:IsNot Return return:yes" - }, - { - "library": "pytorch", - "name": "get_compile_threads", - "source_code": "def get_compile_threads() -> int: if config.compile_threads is None: config.compile_threads = config.decide_compile_threads() return config.compile_threads", - "docstring": "Temporary for internal rollout. Assign config.compile_threads lazily and return it. TODO: remove after rollout.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\async_compile.py", - "ast_data": "FunctionDef name:get_compile_threads arguments If Compare op:Is Assign Call call:decide_compile_threads Return return:yes" - }, - { - "library": "tensorflow", - "name": "smart_cond", - "source_code": "def smart_cond(pred, true_fn = None, false_fn = None, name = None): if isinstance(pred, variables.Variable): return cond.cond(pred, true_fn = true_fn, false_fn = false_fn, name = name) return smart_module.smart_cond(pred, true_fn = true_fn, false_fn = false_fn, name = name)", - "docstring": "Return either if predicate is true else . If is a bool or has a constant value, we return either or , otherwise we use to dynamically route to both. Args: pred: A scalar determining whether to return the result of or . true_fn: The callable to be performed if pred is true. false_fn: The callable to be performed if pred is false. name: Optional name prefix when using . Returns: Tensors returned by the call to either or . Raises: TypeError: If or is not callable.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\layers\\utils.py", - "ast_data": "FunctionDef name:smart_cond arguments arg:pred arg:true_fn arg:false_fn arg:name If Call call:isinstance Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "CompilerWrapper", - "source_code": "class CompilerWrapper: def pre_compile(self, flat_fn, flat_args: list[Tensor], aot_config: AOTConfig, *, fw_metadata: ViewAndMutationMeta) -> tuple[Callable, list[Tensor], ViewAndMutationMeta]: return (flat_fn, flat_args, fw_metadata) def post_compile(self, compiled_fn, aot_config, *, runtime_metadata) -> Callable: return compiled_fn", - "docstring": "A wrapper around the inputs and outputs to the compiler_fn. We separate these into two parts: 1. The prologue, which edits the input to the compiler_fn(flat_fn, flat_args, etc) 2. The epilogue, which edits the outputs of the compiler_fn (compiled_fn, real arguments) Each wrapper below should be implemented as a CompilerWrapper, so that we can facilitate caching on the compiled output, and re-wrapping the output via epilogues. Extra metadata that is needed to compute pre or post compile can be passed in via attributes.", - "type": "class", - "file_path": "pytorch\\torch\\_functorch\\_aot_autograd\\runtime_wrappers.py", - "ast_data": "ClassDef name:CompilerWrapper FunctionDef name:pre_compile arguments arg:self arg:flat_fn arg:flat_args type:list[Tensor] arg:aot_config type:AOTConfig Return return:yes FunctionDef name:post_compile arguments arg:self arg:compiled_fn arg:aot_config Return return:yes" - }, - { - "library": "scipy", - "name": "mean", - "source_code": "def mean(self, df, scale): dim, df, scale = self._process_parameters(df, scale) out = self._mean(dim, df, scale) return _squeeze_output(out) if out is not None else out", - "docstring": "Mean of the inverse Wishart distribution. Only valid if the degrees of freedom are greater than the dimension of the scale matrix plus one. Parameters ---------- %(_doc_default_callparams)s Returns ------- mean : float or None The mean of the distribution", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_multivariate.py", - "ast_data": "FunctionDef name:mean arguments arg:self arg:df arg:scale Assign Call call:_process_parameters Assign Call call:_mean Return return:yes" - }, - { - "library": "pygame", - "name": "get_clip", - "source_code": "def get_clip(self): return self._clip", - "docstring": "get the area where drawing will occur LayeredDirty.get_clip(): return Rect", - "type": "method", - "file_path": "pygame\\src_py\\sprite.py", - "ast_data": "FunctionDef name:get_clip arguments arg:self Return return:yes" - }, - { - "library": "kornia", - "name": "x", - "source_code": "@property def x(self) -> Tensor: return self.keypoints[:, 0]", - "docstring": "Accesses the x coordinates of keypoints (along image width).", - "type": "method", - "file_path": "kornia\\kornia\\feature\\disk\\structs.py", - "ast_data": "FunctionDef name:x arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "np_asarray", - "source_code": "def np_asarray(values, dtype = None, order = None, copy = None): if np.lib.NumpyVersion(np.__version__) > = '2.0.0.dev0': if dtype is not None and np.issubdtype(dtype, np.number): return np.asarray(values, order = order, copy = copy).astype(dtype, copy = copy) else: return np.asarray(values, dtype = dtype, order = order, copy = copy) else: return np.asarray(values, dtype = dtype, order = order)", - "docstring": "Converts input values to a NumPy array. It will not make a copy. In NumPy 2.x and later, strict type casting can lead to errors when values overflow the specified dtype. This function addresses this by replacing direct np.array(..., dtype=...) calls with np.array(...).astype(...). This allows for intended overflows, aligning with the behavior of older NumPy versions. Args: values: Array_like objects. E.g., a python list, tuple, or an object whose __array__ method returns an array. dtype: The desired numpy data type for the array. order: {‘C’, ‘F’, ‘A’, ‘K’}. copy: bool. If True, then the object is copied. If None then the object is copied only if needed, i.e. if __array__ returns a copy, if obj is a nested sequence, or if a copy is needed to satisfy any of the other requirements (dtype, order, etc.). For False it raises a ValueError if a copy cannot be avoided. Returns: A NumPy array with the specified data type.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\util\\numpy_compat.py", - "ast_data": "FunctionDef name:np_asarray arguments arg:values arg:dtype arg:order arg:copy If Compare op:GtE If BoolOp Compare op:IsNot Call call:issubdtype Return return:yes Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "starting_wall_time", - "source_code": "def starting_wall_time(self): return self._starting_wall_time", - "docstring": "Get the starting timestamp of the instrumented TensorFlow program. When there are multiple hosts (i.e., multiple tfdbg file sets), the earliest timestamp among the file sets is returned. It is assumed to be the job that starts first (e.g., the coordinator). Returns: Starting timestamp in seconds since the epoch, as a float.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_events_reader.py", - "ast_data": "FunctionDef name:starting_wall_time arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "op_dispatch_handler", - "source_code": "@traceback_utils.filter_traceback def op_dispatch_handler(*args, **kwargs): if api_dispatcher is not None: if iterable_params is not None: args, kwargs = replace_iterable_params(args, kwargs, iterable_params) result = api_dispatcher.Dispatch(args, kwargs) if result is not NotImplemented: return result try: return dispatch_target(*args, **kwargs) except (TypeError, ValueError): result = dispatch(op_dispatch_handler, args, kwargs) if result is not OpDispatcher.NOT_SUPPORTED: return result else: raise", - "docstring": "Call , performing dispatch when appropriate.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\util\\dispatch.py", - "ast_data": "FunctionDef name:op_dispatch_handler arguments vararg:args kwarg:kwargs If Compare op:IsNot If Compare op:IsNot Assign Call call:replace_iterable_params Assign Call call:Dispatch If Compare op:IsNot Return return:yes Try Return return:yes ExceptHandler Assign Call call:dispatch If Compare op:IsNot Return return:yes Raise" - }, - { - "library": "authlib", - "name": "validate_token_endpoint", - "source_code": "def validate_token_endpoint(self): grant_types_supported = self.get('grant_types_supported') if grant_types_supported and len(grant_types_supported) = = 1 and (grant_types_supported[0] = = 'implicit'): return url = self.get('token_endpoint') if not url: raise ValueError('\"token_endpoint\" is required') if not is_secure_transport(url): raise ValueError('\"token_endpoint\" MUST use \"https\" scheme')", - "docstring": "URL of the authorization server's token endpoint [RFC6749]. This is REQUIRED unless only the implicit grant type is supported.", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\rfc8414\\models.py", - "ast_data": "FunctionDef name:validate_token_endpoint arguments arg:self Assign Call call:get If BoolOp Compare op:Eq Compare op:Eq Return return:no Assign Call call:get If Raise raises:ValueError('\"token_endpoint\" is required') If Raise raises:ValueError('\"token_endpoint\" MUST use \"https\" scheme')" - }, - { - "library": "pytorch", - "name": "keyfilter", - "source_code": "def keyfilter(predicate, d, factory = dict): rv = factory() for k, v in d.items(): if predicate(k): rv[k] = v return rv", - "docstring": "Filter items in dictionary by key >>> iseven = lambda x: x % 2 == 0 >>> d = {1: 2, 2: 3, 3: 4, 4: 5} >>> keyfilter(iseven, d) {2: 3, 4: 5} See Also: valfilter itemfilter keymap", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\unification\\unification_tools.py", - "ast_data": "FunctionDef name:keyfilter arguments arg:predicate arg:d arg:factory Assign Call call:factory For Call call:items If Call call:predicate Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "tf_buffer", - "source_code": "@tf_contextlib.contextmanager def tf_buffer(data = None): if data: buf = c_api.TF_NewBufferFromString(compat.as_bytes(data)) else: buf = c_api.TF_NewBuffer() try: yield buf finally: c_api.TF_DeleteBuffer(buf)", - "docstring": "Context manager that creates and deletes TF_Buffer. Example usage: with tf_buffer() as buf: # get serialized graph def into buf ... proto_data = c_api.TF_GetBuffer(buf) graph_def.ParseFromString(compat.as_bytes(proto_data)) # buf has been deleted with tf_buffer(some_string) as buf: c_api.TF_SomeFunction(buf) # buf has been deleted Args: data: An optional , , or object. If not None, the yielded buffer will contain this data. Yields: Created TF_Buffer", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\framework\\c_api_util.py", - "ast_data": "FunctionDef name:tf_buffer arguments arg:data If Assign Call call:TF_NewBufferFromString Assign Call call:TF_NewBuffer Try" - }, - { - "library": "tensorflow", - "name": "collections", - "source_code": "@property def collections(self) -> list[str]: return list(self._collections)", - "docstring": "Returns the names of the collections known to this graph.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\ops.py", - "ast_data": "FunctionDef name:collections arguments arg:self Return return:yes" - }, - { - "library": "numpy", - "name": "__eq__", - "source_code": "def __eq__(self, other): return self._comparison(other, operator.eq)", - "docstring": "Check whether other equals self elementwise. When either of the elements is masked, the result is masked as well, but the underlying boolean data are still set, with self and other considered equal if both are masked, and unequal otherwise. For structured arrays, all fields are combined, with masked values ignored. The result is masked if all fields were masked, with self and other considered equal only if both were fully masked.", - "type": "method", - "file_path": "numpy\\numpy\\ma\\core.py", - "ast_data": "FunctionDef name:__eq__ arguments arg:self arg:other Return return:yes" - }, - { - "library": "tensorflow", - "name": "gradient_values_from_dump", - "source_code": "def gradient_values_from_dump(grad_debugger, x_tensor, dump): if dump.python_graph and grad_debugger.graph and (dump.python_graph ! = grad_debugger.graph): raise ValueError('This GradientsDebugger instance has a graph (%s) that differs from the graph of the DebugDumpDir object (%s).' % (grad_debugger.graph, dump.python_graph)) gradient_tensor = grad_debugger.gradient_tensor(x_tensor) node_name, output_slot = debug_graphs.parse_node_or_tensor_name(gradient_tensor.name) try: return dump.get_tensors(node_name, output_slot, 'DebugIdentity') except debug_data.WatchKeyDoesNotExistInDebugDumpDirError: return []", - "docstring": "Find gradient values from a object. Args: grad_debugger: the instance to be used. x_tensor: (, or ) The x-tensor object or its name. x-tensor refers to the independent , i.e., the tensor on the denominator of the differentiation. dump: A object. Returns: If this instance has the gradient tensor of registered: a list of representing the value of the gradient tensor from . The list could be empty, if the gradient tensor is not executed in the call that generated the . The list could also contain multiple values of the gradient tensor, e.g., if gradient tensor is computed repeatedly in a during the run that generated the . Raises: LookupError: If this instance does not have the gradient tensor of registered. ValueError: If this has a object that does not match the object of the . TypeError: If is not a , or .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\debug\\lib\\debug_gradients.py", - "ast_data": "FunctionDef name:gradient_values_from_dump arguments arg:grad_debugger arg:x_tensor arg:dump If BoolOp Compare op:NotEq Raise raises:ValueError('This GradientsDebugger instance has a graph (%s) that differs from the graph of the DebugDumpDir object (%s).' % (grad_debugger.graph, dump.python_graph)) Assign Call call:gradient_tensor Assign Call call:parse_node_or_tensor_name Try Return return:yes ExceptHandler Return return:yes" - }, - { - "library": "seaborn", - "name": "label", - "source_code": "def label(self, formatter: Formatter | None = None, *, like: str | Callable | None = None, base: int | None | Default = default, unit: str | None = None) -> Continuous: if formatter is not None and (not isinstance(formatter, Formatter)): raise TypeError(f'Label formatter must be an instance of {Formatter!r}, not {type(formatter)!r}') if like is not None and (not (isinstance(like, str) or callable(like))): msg = f'`like` must be a string or callable, not {type(like).__name__}.' raise TypeError(msg) new = copy(self) new._label_params = {'formatter': formatter, 'like': like, 'base': base, 'unit': unit} return new", - "docstring": "Configure the appearance of tick labels for the scale's axis or legend. Parameters ---------- formatter : :class: subclass Pre-configured formatter to use; other parameters will be ignored. like : str or callable Either a format pattern (e.g., ), a format string with fields named and/or (e.g., ), or a callable with a signature like . In the latter variants, is passed as the tick value and is passed as the tick index. base : number Use log formatter (with scientific notation) having this value as the base. Set to to override the default formatter with a log transform. unit : str or (str, str) tuple Use SI prefixes with these units (e.g., with , a tick value of 5000 will appear as ). When a tuple, the first element gives the separator between the number and unit. Returns ------- scale Copy of self with new label configuration.", - "type": "method", - "file_path": "seaborn\\seaborn\\_core\\scales.py", - "ast_data": "FunctionDef name:label arguments arg:self arg:formatter type:Formatter | None If BoolOp Compare op:IsNot Raise raises:TypeError(f'Label formatter must be an instance of {Formatter!r}, not {type(formatter)!r}') If BoolOp Compare op:IsNot Assign Raise raises:TypeError(msg) Assign Call call:copy Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "get_node_context", - "source_code": "def get_node_context(node, num_nodes = 2) -> str: node_contexts = [] cur = node for _ in range(num_nodes): node_contexts.append(cur.format_node()) if cur.op = = 'root': break cur = cur.prev return '\\n'.join(node_contexts[: : -1])", - "docstring": "Returns a string of the last num_nodes nodes in the graph.", - "type": "function", - "file_path": "pytorch\\torch\\fx\\_utils.py", - "ast_data": "FunctionDef name:get_node_context arguments arg:node arg:num_nodes Assign Assign For Call call:range If Compare op:Eq Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "local_shards", - "source_code": "def local_shards(self) -> list[torch.Tensor]: return self._local_shards", - "docstring": "Returns a list of :class:`torch.Tensor' corresponding to the local shards for this rank. Returns an empty list if the current rank does not host any shards for this Tensor.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\tensor\\_shards_wrapper.py", - "ast_data": "FunctionDef name:local_shards arguments arg:self Return return:yes" - }, - { - "library": "sphinx", - "name": "build_epub", - "source_code": "def build_epub(self) -> None: outname = self.config.epub_basename + '.epub' logger.info(__('writing %s file...'), outname) epub_filename = self.outdir / outname with ZipFile(epub_filename, 'w', ZIP_DEFLATED) as epub: epub.write(self.outdir / 'mimetype', 'mimetype', ZIP_STORED) for filename in ('META-INF/container.xml', 'content.opf', 'toc.ncx'): epub.write(self.outdir / filename, filename, ZIP_DEFLATED) for filename in self.files: epub.write(self.outdir / filename, filename, ZIP_DEFLATED)", - "docstring": "Write the epub file. It is a zip file with the mimetype file stored uncompressed as the first entry.", - "type": "method", - "file_path": "sphinx\\sphinx\\builders\\_epub_base.py", - "ast_data": "FunctionDef name:build_epub arguments arg:self Assign Assign With For For" - }, - { - "library": "scipy", - "name": "__repr__", - "source_code": "def __repr__(self): return f'{self.__class__.__name__}(\\n{repr(self.num)}, \\n{repr(self.den)}, \\ndt: {repr(self.dt)}\\n)'", - "docstring": "Return representation of the system's transfer function", - "type": "method", - "file_path": "scipy\\scipy\\signal\\_ltisys.py", - "ast_data": "FunctionDef name:__repr__ arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "set_3d_properties", - "source_code": "def set_3d_properties(self, zs = 0, zdir = 'z', axlim_clip = False): xs = self.get_xdata() ys = self.get_ydata() zs = cbook._to_unmasked_float_array(zs).ravel() zs = np.broadcast_to(zs, len(xs)) self._verts3d = juggle_axes(xs, ys, zs, zdir) self._axlim_clip = axlim_clip self.stale = True", - "docstring": "Set the *z* position and direction of the line. Parameters ---------- zs : float or array of floats The location along the *zdir* axis in 3D space to position the line. zdir : {'x', 'y', 'z'} Plane to plot line orthogonal to. Default: 'z'. See for a description of the values. axlim_clip : bool, default: False Whether to hide lines with an endpoint outside the axes view limits. .. versionadded:: 3.10", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\mplot3d\\art3d.py", - "ast_data": "FunctionDef name:set_3d_properties arguments arg:self arg:zs arg:zdir arg:axlim_clip Assign Call call:get_xdata Assign Call call:get_ydata Assign Call call:ravel Assign Call call:broadcast_to Assign Call call:juggle_axes Assign Assign" - }, - { - "library": "django", - "name": "kml", - "source_code": "@property def kml(self): if self.hasz: substr = '%s, %s, %s ' else: substr = '%s, %s, 0 ' return '%s' % ''.join((substr % self[i] for i in range(len(self)))).strip()", - "docstring": "Return the KML representation for the coordinates.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\geos\\coordseq.py", - "ast_data": "FunctionDef name:kml arguments arg:self If Assign Assign Return return:yes" - }, - { - "library": "django", - "name": "O", - "source_code": "def O(self): if self.timezone is None: return '' offset = self.timezone.utcoffset(self.data) seconds = offset.days * 86400 + offset.seconds sign = '-' if seconds < 0 else '+' seconds = abs(seconds) return '%s%02d%02d' % (sign, seconds // 3600, seconds // 60 % 60)", - "docstring": "Difference to Greenwich time in hours; e.g. '+0200', '-0430'. If timezone information is not available, return an empty string.", - "type": "method", - "file_path": "django\\django\\utils\\dateformat.py", - "ast_data": "FunctionDef name:O arguments arg:self If Compare op:Is Return return:yes Assign Call call:utcoffset Assign Assign Assign Call call:abs Return return:yes" - }, - { - "library": "pytorch", - "name": "tensor_inference_rule", - "source_code": "@register_inference_rule(torch.tensor) def tensor_inference_rule(n: Node, symbols, constraints, counter): return ([], counter)", - "docstring": "If the tensor is a scalar, we will skip it since we do not support scalars yet. We will add support in the future if it's needed. For our examples so far, scalars are not needed.", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\migrate_gradual_types\\constraint_generator.py", - "ast_data": "FunctionDef name:tensor_inference_rule arguments arg:n type:Node arg:symbols arg:constraints arg:counter Call call:register_inference_rule Return return:yes" - }, - { - "library": "tensorflow", - "name": "alias_inplace_sub", - "source_code": "@deprecation.deprecated(None, 'Prefer tf.tensor_scatter_nd_sub, which offers the same functionality with well-defined read-write semantics.') def alias_inplace_sub(x, i, v): return _inplace_helper(x, i, v, gen_array_ops.inplace_sub)", - "docstring": "Applies an inplace sub on input x at index i with value v. Aliases x. If i is None, x and v must be the same shape. Computes x -= v; If i is a scalar, x has a rank 1 higher than v's. Computes x[i, :] -= v; Otherwise, x and v must have the same rank. Computes x[i, :] -= v; Args: x: A Tensor. i: None, a scalar or a vector. v: A Tensor. Returns: Returns x.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\inplace_ops.py", - "ast_data": "FunctionDef name:alias_inplace_sub arguments arg:x arg:i arg:v Call call:deprecated Return return:yes" - }, - { - "library": "tensorflow", - "name": "metrics", - "source_code": "@property def metrics(self): metrics = [] if self._is_compiled: if not hasattr(self, '_v1_compile_was_called'): return super(Model, self).metrics metrics + = self._compile_metric_functions metrics.extend(self._metrics) metrics.extend(_get_metrics_from_layers(list(self._flatten_layers(include_self = False, recursive = False)))) return metrics", - "docstring": "Returns the model's metrics added using , APIs.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_v1.py", - "ast_data": "FunctionDef name:metrics arguments arg:self Assign If If Return return:yes Return return:yes" - }, - { - "library": "scikit-learn", - "name": "__call__", - "source_code": "def __call__(self, X, Y = None, eval_gradient = False): if eval_gradient: K1, K1_gradient = self.k1(X, Y, eval_gradient = True) K2, K2_gradient = self.k2(X, Y, eval_gradient = True) return (K1 * K2, np.dstack((K1_gradient * K2[:, :, np.newaxis], K2_gradient * K1[:, :, np.newaxis]))) else: return self.k1(X, Y) * self.k2(X, Y)", - "docstring": "Return the kernel k(X, Y) and optionally its gradient. Parameters ---------- X : array-like of shape (n_samples_X, n_features) or list of object Left argument of the returned kernel k(X, Y) Y : array-like of shape (n_samples_Y, n_features) or list of object, default=None Right argument of the returned kernel k(X, Y). If None, k(X, X) is evaluated instead. eval_gradient : bool, default=False Determines whether the gradient with respect to the log of the kernel hyperparameter is computed. Returns ------- K : ndarray of shape (n_samples_X, n_samples_Y) Kernel k(X, Y) K_gradient : ndarray of shape (n_samples_X, n_samples_X, n_dims), optional The gradient of the kernel k(X, X) with respect to the log of the hyperparameter of the kernel. Only returned when is True.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\gaussian_process\\kernels.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:X arg:Y arg:eval_gradient If Assign Call call:k1 Assign Call call:k2 Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "check_steps_argument", - "source_code": "def check_steps_argument(input_data, steps, steps_name): is_x_iterator = isinstance(input_data, (iterator_ops.Iterator, iterator_ops.IteratorBase)) if input_data is None or is_x_iterator or has_symbolic_tensors(input_data) or (isinstance(input_data, list) and (not input_data)): if steps is None: input_type_str = 'a Dataset iterator' if is_x_iterator else 'data tensors' raise ValueError('When using {input_type} as input to a model, you should specify the `{steps_name}` argument.'.format(input_type = input_type_str, steps_name = steps_name)) return True if isinstance(input_data, (data_types.DatasetV1, data_types.DatasetV2)): return True if steps is not None: list_types = (np.ndarray, list, tuple) if isinstance(input_data, list_types) or (isinstance(input_data, dict) and any((isinstance(v, list_types) for v in input_data.values()))): logging.warning('When passing input data as arrays, do not specify `steps_per_epoch`/`steps` argument. Please use `batch_size` instead.') return False", - "docstring": "Validates argument based on input data's type. The cases when value must be provided are when 1. input data passed is an iterator. 2. model was built on top of symbolic tensors, input data is not required and is . 3. input data passed is a symbolic tensor. Args: input_data: Input data. Can be Numpy array(s) or TensorFlow tensor(s) or tf.data.Dataset iterator or . steps: Integer or . Total number of steps (batches of samples) to execute. steps_name: The public API's parameter name for . Returns: boolean, True if argument is required, else False. Raises: ValueError: if argument is required for given input data type but not provided.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\training_utils_v1.py", - "ast_data": "FunctionDef name:check_steps_argument arguments arg:input_data arg:steps arg:steps_name Assign Call call:isinstance If BoolOp Compare op:Is Call call:has_symbolic_tensors BoolOp Call call:isinstance If Compare op:Is Assign Raise raises:ValueError('When using {input_type} as input to a model, you should specify the `{steps_name}` argument.'.format(input_type=input_type_str, steps_name=steps_name)) Return return:yes If Call call:isinstance Return return:yes If Compare op:IsNot Assign If BoolOp Call call:isinstance BoolOp Call call:isinstance Call call:any Return return:yes" - }, - { - "library": "scipy", - "name": "interp2d", - "source_code": "class interp2d: def __init__(self, x, y, z, kind = 'linear', copy = True, bounds_error = False, fill_value = None): raise NotImplementedError(err_mesg)", - "docstring": "interp2d(x, y, z, kind='linear', copy=True, bounds_error=False, fill_value=None) Class for 2D interpolation (deprecated and removed) .. versionremoved:: 1.14.0 has been removed in SciPy 1.14.0. For legacy code, nearly bug-for-bug compatible replacements are on regular grids, and / for scattered 2D data. In new code, for regular grids use instead. For scattered data, prefer or . For more details see :ref:.", - "type": "class", - "file_path": "scipy\\scipy\\interpolate\\_interpolate.py", - "ast_data": "ClassDef name:interp2d FunctionDef name:__init__ arguments arg:self arg:x arg:y arg:z arg:kind arg:copy arg:bounds_error arg:fill_value Raise raises:NotImplementedError(err_mesg)" - }, - { - "library": "sphinx", - "name": "nested_parse_to_nodes", - "source_code": "def nested_parse_to_nodes(state: RSTState, text: str | StringList, *, source: str = '', offset: int = 0, allow_section_headings: bool = True, keep_title_context: bool = False) -> list[Node]: document = state.document content = _text_to_string_list(text, source = source, tab_width = document.settings.tab_width) node = Element() node.document = document if keep_title_context: state.nested_parse(content, offset, node, match_titles = allow_section_headings) else: with _fresh_title_style_context(state): state.nested_parse(content, offset, node, match_titles = allow_section_headings) return node.children", - "docstring": "Parse *text* into nodes. :param state: The state machine state. Must be a subclass of `` nodes. :param keep_title_context: If this is False (the default), then *content* is parsed as if it were an independent document, meaning that title decorations (e.g. underlines) do not need to match the surrounding document. This is useful when the parsed content comes from a completely different context, such as docstrings. If this is True, then title underlines must match those in the surrounding document, otherwise the behaviour is undefined. .. versionadded:: 7.4", - "type": "function", - "file_path": "sphinx\\sphinx\\util\\parsing.py", - "ast_data": "FunctionDef name:nested_parse_to_nodes arguments arg:state type:RSTState arg:text type:str | StringList Assign Assign Call call:_text_to_string_list Assign Call call:Element Assign If With Return return:yes" - }, - { - "library": "pytorch", - "name": "set_overwrite_module_params_on_conversion", - "source_code": "def set_overwrite_module_params_on_conversion(value: bool) -> None: global _overwrite_module_params_on_conversion _overwrite_module_params_on_conversion = value", - "docstring": "Sets whether to assign new tensors to the parameters instead of changing the existing parameters in-place when converting an `nn.Module.cuda()nn.Module.float()nn.Module.tonn.Module.to_empty` Args: value (bool): Whether to assign new tensors or not.", - "type": "function", - "file_path": "pytorch\\torch\\__future__.py", - "ast_data": "FunctionDef name:set_overwrite_module_params_on_conversion arguments arg:value type:bool Assign" - }, - { - "library": "authlib", - "name": "validate_claim_types_supported", - "source_code": "def validate_claim_types_supported(self): values = self.get('claim_types_supported') if not values: return if not isinstance(values, list): raise ValueError('\"claim_types_supported\" MUST be JSON array') valid_values = {'normal', 'aggregated', 'distributed'} if not valid_values.issuperset(set(values)): raise ValueError('\"claim_types_supported\" contains invalid values')", - "docstring": "OPTIONAL. JSON array containing a list of the Claim Types that the OpenID Provider supports. These Claim Types are described in Section 5.6 of OpenID Connect Core 1.0. Values defined by this specification are normal, aggregated, and distributed. If omitted, the implementation supports only normal Claims.", - "type": "method", - "file_path": "authlib\\authlib\\oidc\\discovery\\models.py", - "ast_data": "FunctionDef name:validate_claim_types_supported arguments arg:self Assign Call call:get If Return return:no If Raise raises:ValueError('\"claim_types_supported\" MUST be JSON array') Assign If Raise raises:ValueError('\"claim_types_supported\" contains invalid values')" - }, - { - "library": "kornia", - "name": "Vflip", - "source_code": "class Vflip(Module): def forward(self, input: Tensor) -> Tensor: return vflip(input) def __repr__(self) -> str: return self.__class__.__name__", - "docstring": "Vertically flip a tensor image or a batch of tensor images. Input must be a tensor of shape (C, H, W) or a batch of tensors :math:. Args: input: input tensor. Returns: The vertically flipped image tensor. Examples: >>> vflip = Vflip() >>> input = torch.tensor([[[ ... [0., 0., 0.], ... [0., 0., 0.], ... [0., 1., 1.] ... ]]]) >>> vflip(input) tensor([[[[0., 1., 1.], [0., 0., 0.], [0., 0., 0.]]]])", - "type": "class", - "file_path": "kornia\\kornia\\geometry\\transform\\flips.py", - "ast_data": "ClassDef name:Vflip FunctionDef name:forward arguments arg:self arg:input type:Tensor Return return:yes FunctionDef name:__repr__ arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "to_sharded_dtensor", - "source_code": "def to_sharded_dtensor(self, tensor: torch.Tensor) -> DTensor: if tensor.shape ! = self.sharded_size: _raise_assert_with_print(f'Expects size {self.sharded_size} but got {tensor.shape}') return _from_local_no_grad(tensor, self._sharding_spec)", - "docstring": "Converts a local tensor representing either the sharded parameter or sharded gradient to DTensor.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\fsdp\\_fully_shard\\_fsdp_param.py", - "ast_data": "FunctionDef name:to_sharded_dtensor arguments arg:self arg:tensor type:torch.Tensor If Compare op:NotEq Return return:yes" - }, - { - "library": "numpy", - "name": "data", - "source_code": "@property def data(self): return self._data.value", - "docstring": "A pointer to the memory area of the array as a Python integer. This memory area may contain data that is not aligned, or not in correct byte-order. The memory area may not even be writeable. The array flags and data-type of this array should be respected when passing this attribute to arbitrary C-code to avoid trouble that can include Python crashing. User Beware! The value of this attribute is exactly the same as: ``", - "type": "method", - "file_path": "numpy\\numpy\\_core\\_internal.py", - "ast_data": "FunctionDef name:data arguments arg:self Return return:yes" - }, - { - "library": "coconut", - "name": "memoize", - "source_code": "def memoize(maxsize = None, *args, **kwargs): assert maxsize is None or isinstance(maxsize, int), maxsize if lru_cache is None: return lambda func: func else: return lru_cache(maxsize, *args, **kwargs)", - "docstring": "Decorator that memoizes a function, preventing it from being recomputed if it is called multiple times with the same arguments.", - "type": "function", - "file_path": "coconut\\coconut\\util.py", - "ast_data": "FunctionDef name:memoize arguments arg:maxsize vararg:args kwarg:kwargs If Compare op:Is Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_config", - "source_code": "@generic_utils.default def get_config(self): all_args = tf_inspect.getfullargspec(self.__init__).args config = {'name': self.name, 'trainable': self.trainable} if hasattr(self, '_batch_input_shape'): config['batch_input_shape'] = self._batch_input_shape config['dtype'] = policy.serialize(self._dtype_policy) if hasattr(self, 'dynamic'): if self.dynamic: config['dynamic'] = self.dynamic elif 'dynamic' in all_args: all_args.remove('dynamic') expected_args = config.keys() extra_args = [arg for arg in all_args if arg not in expected_args] if len(extra_args) > 1 and hasattr(self.get_config, '_is_default'): raise NotImplementedError('Layer %s has arguments in `__init__` and therefore must override `get_config`.' % self.__class__.__name__) return config", - "docstring": "Returns the config of the layer. A layer config is a Python dictionary (serializable) containing the configuration of a layer. The same layer can be reinstantiated later (without its trained weights) from this configuration. The config of a layer does not include connectivity information, nor the layer class name. These are handled by (one layer of abstraction above). Note that does not guarantee to return a fresh copy of dict every time it is called. The callers should make a copy of the returned dict if they want to modify it. Returns: Python dictionary.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\base_layer.py", - "ast_data": "FunctionDef name:get_config arguments arg:self Assign Assign If Call call:hasattr Assign Assign Call call:serialize If Call call:hasattr If Assign If Compare op:In Assign Call call:keys Assign If BoolOp Compare op:Gt Call call:hasattr Raise raises:NotImplementedError('Layer %s has arguments in `__init__` and therefore must override `get_config`.' % self.__class__.__name__) Return return:yes" - }, - { - "library": "pytorch", - "name": "statically_known_geq", - "source_code": "def statically_known_geq(self, left: Expr, right: Union[Expr, int]) -> bool: expr = left > = right return self.is_expr_static_and_true(expr)", - "docstring": "Returns a bool indicating if it is sound to optimize as if left is greater than or equal to right.", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\sizevars.py", - "ast_data": "FunctionDef name:statically_known_geq arguments arg:self arg:left type:Expr arg:right type:Union[Expr, int] Assign Compare op:GtE Return return:yes" - }, - { - "library": "django", - "name": "parse_http_date", - "source_code": "def parse_http_date(date): for regex in (RFC1123_DATE, RFC850_DATE, ASCTIME_DATE): m = regex.match(date) if m is not None: break else: raise ValueError('%r is not in a valid HTTP date format' % date) try: year = int(m['year']) if year < 100: current_year = datetime.now(tz = UTC).year current_century = current_year - current_year % 100 if year - current_year % 100 > 50: year + = current_century - 100 else: year + = current_century month = MONTHS.index(m['mon'].lower()) + 1 day = int(m['day']) hour = int(m['hour']) min = int(m['min']) sec = int(m['sec']) result = datetime(year, month, day, hour, min, sec, tzinfo = UTC) return int(result.timestamp()) except Exception as exc: raise ValueError('%r is not a valid date' % date) from exc", - "docstring": "Parse a date format as specified by HTTP RFC 9110 Section 5.6.7. The three formats allowed by the RFC are accepted, even if only the first one is still in widespread use. Return an integer expressed in seconds since the epoch, in UTC.", - "type": "function", - "file_path": "django\\django\\utils\\http.py", - "ast_data": "FunctionDef name:parse_http_date arguments arg:date For Assign Call call:match If Compare op:IsNot Raise raises:ValueError('%r is not in a valid HTTP date format' % date) Try Assign Call call:int If Compare op:Lt Assign Assign If Compare op:Gt Assign Assign Call call:int Assign Call call:int Assign Call call:int Assign Call call:int Assign Call call:datetime Return return:yes ExceptHandler Raise raises:ValueError('%r is not a valid date' % date)" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, num_packs = 1): if num_packs < 0: raise ValueError('NCCL all-reduce requires num_packs > = 0, but {} is specified'.format(num_packs)) super(NcclAllReduce, self).__init__(all_reduce_alg = 'nccl', num_packs = num_packs)", - "docstring": "Initializes the object. Args: num_packs: a non-negative integer. The number of packs to split values into. If zero, no packing will be done. Raises: ValueError: if is negative.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\cross_device_ops.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:num_packs If Compare op:Lt Raise raises:ValueError('NCCL all-reduce requires num_packs >= 0, but {} is specified'.format(num_packs))" - }, - { - "library": "kornia", - "name": "PyrDown", - "source_code": "class PyrDown(Module): def __init__(self, border_type: str = 'reflect', align_corners: bool = False, factor: float = 2.0) -> None: super().__init__() self.border_type: str = border_type self.align_corners: bool = align_corners self.factor: float = factor def forward(self, input: Tensor) -> Tensor: return pyrdown(input, self.border_type, self.align_corners, self.factor)", - "docstring": "Blur a tensor and downsamples it. Args: border_type: the padding mode to be applied before convolving. The expected modes are: `(B, C, H, W)(B, C, H / 2, W / 2)` Examples: >>> input = torch.rand(1, 2, 4, 4) >>> output = PyrDown()(input) # 1x2x2x2", - "type": "class", - "file_path": "kornia\\kornia\\geometry\\transform\\pyramid.py", - "ast_data": "ClassDef name:PyrDown FunctionDef name:__init__ arguments arg:self arg:border_type type:str arg:align_corners type:bool arg:factor type:float FunctionDef name:forward arguments arg:self arg:input type:Tensor Return return:yes" - }, - { - "library": "pandas", - "name": "standardize_mapping", - "source_code": "def standardize_mapping(into): if not inspect.isclass(into): if isinstance(into, defaultdict): return partial(defaultdict, into.default_factory) into = type(into) if not issubclass(into, abc.Mapping): raise TypeError(f'unsupported type: {into}') if into = = defaultdict: raise TypeError('to_dict() only accepts initialized defaultdicts') return into", - "docstring": "Helper function to standardize a supplied mapping. Parameters ---------- into : instance or subclass of collections.abc.Mapping Must be a class, an initialized collections.defaultdict, or an instance of a collections.abc.Mapping subclass. Returns ------- mapping : a collections.abc.Mapping subclass or other constructor a callable object that can accept an iterator to create the desired Mapping. See Also -------- DataFrame.to_dict Series.to_dict", - "type": "function", - "file_path": "pandas\\pandas\\core\\common.py", - "ast_data": "FunctionDef name:standardize_mapping arguments arg:into If If Call call:isinstance Return return:yes Assign Call call:type If Raise raises:TypeError(f'unsupported type: {into}') If Compare op:Eq Raise raises:TypeError('to_dict() only accepts initialized defaultdicts') Return return:yes" - }, - { - "library": "coconut", - "name": "__call__", - "source_code": "@override def __call__(self, source, *args, **kwargs): if isinstance(source, (str, bytes)): compiled = syntaxerr_memoized_parse_block(source) else: compiled = source return super(CoconutCompiler, self).__call__(compiled, *args, **kwargs)", - "docstring": "Version of __call__ that compiles Coconut code first.", - "type": "method", - "file_path": "coconut\\coconut\\icoconut\\root.py", - "ast_data": "FunctionDef name:__call__ arguments arg:self arg:source vararg:args kwarg:kwargs If Call call:isinstance Assign Call call:syntaxerr_memoized_parse_block Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "ArtistAnimation", - "source_code": "class ArtistAnimation(TimedAnimation): def __init__(self, fig, artists, *args, **kwargs): self._drawn_artists = [] self._framedata = artists super().__init__(fig, *args, **kwargs) def _init_draw(self): super()._init_draw() figs = set() for f in self.new_frame_seq(): for artist in f: artist.set_visible(False) artist.set_animated(self._blit) if artist.get_figure() not in figs: figs.add(artist.get_figure()) for fig in figs: fig.canvas.draw_idle() def _pre_draw(self, framedata, blit): if blit: self._blit_clear(self._drawn_artists) else: for artist in self._drawn_artists: artist.set_visible(False) def _draw_frame(self, artists): self._drawn_artists = artists for artist in artists: artist.set_visible(True)", - "docstring": "subclass that creates an animation by using a fixed set of objects. Before creating an instance, all plotting should have taken place and the relevant artists saved. .. note:: You must store the created Animation in a variable that lives as long as the animation should run. Otherwise, the Animation object will be garbage-collected and the animation stops. Parameters ---------- fig : The figure object used to get needed events, such as draw or resize. artists : list Each list entry is a collection of objects that are made visible on the corresponding frame. Other artists are made invisible. interval : int, default: 200 Delay between frames in milliseconds. repeat_delay : int, default: 0 The delay in milliseconds between consecutive animation runs, if *repeat* is True. repeat : bool, default: True Whether the animation repeats when the sequence of frames is completed. blit : bool, default: False Whether blitting is used to optimize drawing.", - "type": "class", - "file_path": "matplotlib\\lib\\matplotlib\\animation.py", - "ast_data": "ClassDef name:ArtistAnimation FunctionDef name:__init__ arguments arg:self arg:fig arg:artists vararg:args kwarg:kwargs Assign Assign FunctionDef name:_init_draw arguments arg:self Assign Call call:set For Call call:new_frame_seq For If Compare op:NotIn For FunctionDef name:_pre_draw arguments arg:self arg:framedata arg:blit If For FunctionDef name:_draw_frame arguments arg:self arg:artists Assign For" - }, - { - "library": "tensorflow", - "name": "get_gradient_components", - "source_code": "def get_gradient_components(self, value): return value", - "docstring": "Returns the components of that should be included in gradients. For a ResourceVariable, its gradient component is its handle tensor. For now, we return the ResourceVariable because the gradient infrastructure has special logic to handle ResourceVariables. We should remove the special logic and return the handle tensor. Args: value: A . Returns: itself.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\resource_variable_ops.py", - "ast_data": "FunctionDef name:get_gradient_components arguments arg:self arg:value Return return:yes" - }, - { - "library": "pytorch", - "name": "elapsed_time", - "source_code": "def elapsed_time(self, end_event): return super().elapsed_time(end_event)", - "docstring": "Return the time elapsed. Time reported in milliseconds after the event was recorded and before the end_event was recorded.", - "type": "method", - "file_path": "pytorch\\torch\\cuda\\streams.py", - "ast_data": "FunctionDef name:elapsed_time arguments arg:self arg:end_event Return return:yes" - }, - { - "library": "kornia", - "name": "dog_response", - "source_code": "def dog_response(input: Tensor) -> Tensor: KORNIA_CHECK_SHAPE(input, ['B', 'C', 'L', 'H', 'W']) return input[:, :, 1:] - input[:, :, : -1]", - "docstring": "Compute the Difference-of-Gaussian response. Args: input: a given the gaussian 5d tensor :math:. Return: the response map per channel with shape :math:.", - "type": "function", - "file_path": "kornia\\kornia\\feature\\responses.py", - "ast_data": "FunctionDef name:dog_response arguments arg:input type:Tensor Return return:yes" - }, - { - "library": "pytorch", - "name": "is_pinned", - "source_code": "def is_pinned(self, device: Union[str, torch.device] = 'cuda'): return torch.tensor([], dtype = torch.uint8, device = self.device).set_(cast(Storage, self)).is_pinned(device)", - "docstring": "Determine whether the CPU storage is already pinned on device. Args: device (str or torch.device): The device to pin memory on (default: ``). This argument is discouraged and subject to deprecated. Returns: A boolean variable.", - "type": "method", - "file_path": "pytorch\\torch\\storage.py", - "ast_data": "FunctionDef name:is_pinned arguments arg:self arg:device type:Union[str, torch.device] Return return:yes" - }, - { - "library": "kornia", - "name": "symmetric_transfer_error", - "source_code": "def symmetric_transfer_error(pts1: Tensor, pts2: Tensor, H: Tensor, squared: bool = True, eps: float = 1e-08) -> Tensor: KORNIA_CHECK_SHAPE(H, ['B', '3', '3']) if pts1.size(-1) = = 3: pts1 = convert_points_from_homogeneous(pts1) if pts2.size(-1) = = 3: pts2 = convert_points_from_homogeneous(pts2) max_num = torch.finfo(pts1.dtype).max H_inv, good_H = safe_inverse_with_mask(H) there: Tensor = oneway_transfer_error(pts1, pts2, H, True, eps) back: Tensor = oneway_transfer_error(pts2, pts1, H_inv, True, eps) good_H_reshape: Tensor = good_H.view(-1, 1).expand_as(there) out = (there + back) * good_H_reshape.to(there.dtype) + max_num * (~good_H_reshape).to(there.dtype) if squared: return out return (out + eps).sqrt()", - "docstring": "Return Symmetric transfer error for correspondences given the homography matrix. Args: pts1: correspondences from the left images with shape (B, N, 2 or 3). If they are homogeneous, converted automatically. pts2: correspondences from the right images with shape (B, N, 2 or 3). If they are homogeneous, converted automatically. H: Homographies with shape :math:. squared: if True (default), the squared distance is returned. eps: Small constant for safe sqrt. Returns: the computed distance with shape :math:.", - "type": "function", - "file_path": "kornia\\kornia\\geometry\\homography.py", - "ast_data": "FunctionDef name:symmetric_transfer_error arguments arg:pts1 type:Tensor arg:pts2 type:Tensor arg:H type:Tensor arg:squared type:bool arg:eps type:float If Compare op:Eq Assign Call call:convert_points_from_homogeneous If Compare op:Eq Assign Call call:convert_points_from_homogeneous Assign Assign Call call:safe_inverse_with_mask Assign If Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "weights", - "source_code": "@property def weights(self): return self.variables", - "docstring": "List of weights/variables created by the Template.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\template.py", - "ast_data": "FunctionDef name:weights arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "create_multilayer", - "source_code": "@classmethod def create_multilayer(cls, device: torch.device, dst_dtype: torch.dtype, src_dtype: torch.dtype, inner_fn: Callable[..., Any], ranges: Sequence[Expr], reduction_ranges: Sequence[Expr], reduction_type: ReductionType, split: _IntLike, reduction_hint: ReductionHint, input_node: Optional[IRNode] = None) -> TensorBox: reduction_numel = sympy_product(reduction_ranges) block_size = FloorDiv(reduction_numel + (split - 1), split) default = cls.default_value(reduction_type, dst_dtype) wrapper_fn = cls._multilayer_wrap_loader(inner_fn, reduction_ranges, reduction_numel, split, block_size, default, input_node) return cls.create_multilayer_helper(device, dst_dtype, src_dtype, wrapper_fn, ranges, reduction_ranges, [*ranges, split], [block_size], reduction_type, split, reduction_hint)", - "docstring": "Break a large reduction up into multiple smaller reductions recursively", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\ir.py", - "ast_data": "FunctionDef name:create_multilayer arguments arg:cls arg:device type:torch.device arg:dst_dtype type:torch.dtype arg:src_dtype type:torch.dtype arg:inner_fn type:Callable[..., Any] arg:ranges type:Sequence[Expr] arg:reduction_ranges type:Sequence[Expr] arg:reduction_type type:ReductionType arg:split type:_IntLike arg:reduction_hint type:ReductionHint arg:input_node type:Optional[IRNode] Assign Call call:sympy_product Assign Call call:FloorDiv Assign Call call:default_value Assign Call call:_multilayer_wrap_loader Return return:yes" - }, - { - "library": "scipy", - "name": "bench_run_global", - "source_code": "def bench_run_global(self, numtrials = 50, methods = None): if methods is None: methods = ['DE', 'basinh.', 'DA', 'DIRECT', 'SHGO'] stochastic_methods = ['DE', 'basinh.', 'DA'] method_fun = {'DE': self.run_differentialevolution, 'basinh.': self.run_basinhopping, 'DA': self.run_dualannealing, 'DIRECT': self.run_direct, 'SHGO': self.run_shgo} for m in methods: if m in stochastic_methods: for i in range(numtrials): method_fun[m]() else: method_fun[m]()", - "docstring": "Run the optimization tests for the required minimizers.", - "type": "method", - "file_path": "scipy\\benchmarks\\benchmarks\\optimize.py", - "ast_data": "FunctionDef name:bench_run_global arguments arg:self arg:numtrials arg:methods If Compare op:Is Assign Assign Assign For If Compare op:In For Call call:range" - }, - { - "library": "pytorch", - "name": "ReplaceGetAttrWithPlaceholder", - "source_code": "class ReplaceGetAttrWithPlaceholder(_pass.Transform): _replaced_attrs: tuple[torch.Tensor, ...] | None @property def replaced_attrs(self) -> tuple[torch.Tensor, ...]: assert self._replaced_attrs is not None, 'Must run ReplaceGetAttrWithPlaceholder first' return self._replaced_attrs def _run(self, *args, **kwargs) -> torch.fx.GraphModule: graph_module = self.module graph = graph_module.graph replaced_attrs: list[torch.Tensor] = [] for node in graph.nodes: if node.op = = 'get_attr': replaced_attr: torch.Tensor | None = None try: replaced_attr = graph_module.get_parameter(node.target) except AttributeError: replaced_attr = graph_module.get_buffer(node.target) node.op = 'placeholder' node.target = node.target.replace('.', '_') node.args = (None,) replaced_attrs.append(replaced_attr) self._replaced_attrs = tuple(replaced_attrs) return graph_module", - "docstring": "Replace get_attr with placeholder. The parameters and buffers accessed by the original get_attr are returned; they are useful when creating random inputs for the modified graph_module.", - "type": "class", - "file_path": "pytorch\\torch\\onnx\\_internal\\fx\\passes\\virtualization.py", - "ast_data": "ClassDef name:ReplaceGetAttrWithPlaceholder FunctionDef name:replaced_attrs arguments arg:self Return return:yes FunctionDef name:_run arguments arg:self vararg:args kwarg:kwargs Assign Assign For If Compare op:Eq Try Assign Call call:get_parameter ExceptHandler Assign Call call:get_buffer Assign Assign Call call:replace Assign Assign Call call:tuple Return return:yes" - }, - { - "library": "pytorch", - "name": "fuse_linear_bn", - "source_code": "def fuse_linear_bn(is_qat, linear, bn): assert linear.training = = bn.training, 'Linear and BN both must be in the same mode (train or eval).' if is_qat: assert bn.num_features = = linear.out_features, 'Output features of Linear must match num_features of BatchNorm1d' assert bn.affine, 'Only support fusing BatchNorm1d with affine set to True' assert bn.track_running_stats, 'Only support fusing BatchNorm1d with tracking_running_stats set to True' return nni.LinearBn1d(linear, bn) else: return nn.utils.fusion.fuse_linear_bn_eval(linear, bn)", - "docstring": "Return the fused linear and bn modules. Given the linear and bn modules, fuses them and returns the fused module Args: is_qat: a flag for whether we are using quantization aware training fusion or post training quantization fusion linear: Module instance of type Linear bn: BatchNorm1d instance that needs to be fused with the linear layer Examples:: >>> m1 = nn.Linear(20, 10) >>> b1 = nn.BatchNorm1d(10) >>> # xdoctest: +SKIP >>> m2 = fuse_linear_bn(m1, b1)", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\fuser_method_mappings.py", - "ast_data": "FunctionDef name:fuse_linear_bn arguments arg:is_qat arg:linear arg:bn If Return return:yes Return return:yes" - }, - { - "library": "numpy", - "name": "put", - "source_code": "def put(self, indices, values, mode = 'raise'): if self._hardmask and self._mask is not nomask: mask = self._mask[indices] indices = narray(indices, copy = None) values = narray(values, copy = None, subok = True) values.resize(indices.shape) indices = indices[~mask] values = values[~mask] self._data.put(indices, values, mode = mode) if self._mask is nomask and getmask(values) is nomask: return m = getmaskarray(self) if getmask(values) is nomask: m.put(indices, False, mode = mode) else: m.put(indices, values._mask, mode = mode) m = make_mask(m, copy = False, shrink = True) self._mask = m return", - "docstring": "Set storage-indexed locations to corresponding values. Sets self._data.flat[n] = values[n] for each n in indices. If is shorter than then it will repeat. If has some masked values, the initial mask is updated in consequence, else the corresponding values are unmasked. Parameters ---------- indices : 1-D array_like Target indices, interpreted as integers. values : array_like Values to place in self._data copy at target indices. mode : {'raise', 'wrap', 'clip'}, optional Specifies how out-of-bounds indices will behave. 'raise' : raise an error. 'wrap' : wrap around. 'clip' : clip to the range. Notes ----- can be a scalar or length 1 array. Examples -------- >>> import numpy as np >>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4) >>> x masked_array( data=[[1, --, 3], [--, 5, --], [7, --, 9]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> x.put([0,4,8],[10,20,30]) >>> x masked_array( data=[[10, --, 3], [--, 20, --], [7, --, 30]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999) >>> x.put(4,999) >>> x masked_array( data=[[10, --, 3], [--, 999, --], [7, --, 30]], mask=[[False, True, False], [ True, False, True], [False, True, False]], fill_value=999999)", - "type": "method", - "file_path": "numpy\\numpy\\ma\\core.py", - "ast_data": "FunctionDef name:put arguments arg:self arg:indices arg:values arg:mode If BoolOp Compare op:IsNot Assign Assign Call call:narray Assign Call call:narray Assign Assign If BoolOp Compare op:Is Compare op:Is Return return:no Assign Call call:getmaskarray If Compare op:Is Assign Call call:make_mask Assign Return return:no" - }, - { - "library": "scikit-learn", - "name": "fit", - "source_code": "@_fit_context(prefer_skip_nested_validation = True) def fit(self, X, y = None): X = validate_data(self, X, accept_sparse = 'csr') random_state = check_random_state(self.random_state) n_features = X.shape[1] sparse = sp.issparse(X) if self.gamma = = 'scale': X_var = X.multiply(X).mean() - X.mean() ** 2 if sparse else X.var() self._gamma = 1.0 / (n_features * X_var) if X_var ! = 0 else 1.0 else: self._gamma = self.gamma self.random_weights_ = (2.0 * self._gamma) ** 0.5 * random_state.normal(size = (n_features, self.n_components)) self.random_offset_ = random_state.uniform(0, 2 * np.pi, size = self.n_components) if X.dtype = = np.float32: self.random_weights_ = self.random_weights_.astype(X.dtype, copy = False) self.random_offset_ = self.random_offset_.astype(X.dtype, copy = False) self._n_features_out = self.n_components return self", - "docstring": "Fit the model with X. Samples random projection according to n_features. Parameters ---------- X : {array-like, sparse matrix}, shape (n_samples, n_features) Training data, where is the number of samples and is the number of features. y : array-like, shape (n_samples,) or (n_samples, n_outputs), default=None Target values (None for unsupervised transformations). Returns ------- self : object Returns the instance itself.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\kernel_approximation.py", - "ast_data": "FunctionDef name:fit arguments arg:self arg:X arg:y Call call:_fit_context Assign Call call:validate_data Assign Call call:check_random_state Assign Assign Call call:issparse If Compare op:Eq Assign Assign Assign Assign Assign Call call:uniform If Compare op:Eq Assign Call call:astype Assign Call call:astype Assign Return return:yes" - }, - { - "library": "scikit-learn", - "name": "fit", - "source_code": "@_fit_context(prefer_skip_nested_validation = True) def fit(self, raw_documents, y = None): self._check_params() self._warn_for_unused_params() self._tfidf = TfidfTransformer(norm = self.norm, use_idf = self.use_idf, smooth_idf = self.smooth_idf, sublinear_tf = self.sublinear_tf) X = super().fit_transform(raw_documents) self._tfidf.fit(X) return self", - "docstring": "Learn vocabulary and idf from training set. Parameters ---------- raw_documents : iterable An iterable which generates either str, unicode or file objects. y : None This parameter is not needed to compute tfidf. Returns ------- self : object Fitted vectorizer.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\feature_extraction\\text.py", - "ast_data": "FunctionDef name:fit arguments arg:self arg:raw_documents arg:y Call call:_fit_context Assign Call call:TfidfTransformer Assign Call call:fit_transform Return return:yes" - }, - { - "library": "tensorflow", - "name": "is_namedtuple", - "source_code": "def is_namedtuple(instance, strict = False): return _pywrap_utils.IsNamedtuple(instance, strict)", - "docstring": "Returns True iff is a . Args: instance: An instance of a Python object. strict: If True, is considered to be a only if it is a \"plain\" namedtuple. For instance, a class inheriting from a will be considered to be a iff . Returns: True if is a .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\util\\nest_util.py", - "ast_data": "FunctionDef name:is_namedtuple arguments arg:instance arg:strict Return return:yes" - }, - { - "library": "virtualenv", - "name": "creator", - "source_code": "@property def creator(self): return self._creator", - "docstring": "The creator used to build the virtual environment (must be compatible with the interpreter).", - "type": "method", - "file_path": "virtualenv\\src\\virtualenv\\run\\session.py", - "ast_data": "FunctionDef name:creator arguments arg:self Return return:yes" - }, - { - "library": "pytorch", - "name": "restride_A_for_fused_matmul_reduce_scatter", - "source_code": "def restride_A_for_fused_matmul_reduce_scatter(t: torch.Tensor, scatter_dim: int) -> torch.Tensor: perm = list(range(len(t.shape))) perm.insert(0, perm.pop(scatter_dim)) return make_contiguous_for_perm(t, perm)", - "docstring": "Restride the arg of for optimal perf. See the doc for for detail.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\_symmetric_memory\\__init__.py", - "ast_data": "FunctionDef name:restride_A_for_fused_matmul_reduce_scatter arguments arg:t type:torch.Tensor arg:scatter_dim type:int Assign Call call:list Return return:yes" - }, - { - "library": "flexx", - "name": "export", - "source_code": "def export(cls, filename, properties = None, **kwargs): if properties is not None: raise RuntimeError('export(... properties) is deprecated, use app.App(...).export() instead.') assert isinstance(cls, type) and issubclass(cls, (PyComponent, JsComponent)) a = App(cls) return a.export(filename, **kwargs)", - "docstring": "Shorthand for ``.", - "type": "function", - "file_path": "flexx\\flexx\\app\\_funcs.py", - "ast_data": "FunctionDef name:export arguments arg:cls arg:filename arg:properties kwarg:kwargs If Compare op:IsNot Raise raises:RuntimeError('export(... properties) is deprecated, use app.App(...).export() instead.') Assign Call call:App Return return:yes" - }, - { - "library": "pytorch", - "name": "get_buffer", - "source_code": "def get_buffer(self, target: str) -> 'Tensor': module_path, _, buffer_name = target.rpartition('.') mod: torch.nn.Module = self.get_submodule(module_path) if not hasattr(mod, buffer_name): raise AttributeError(mod._get_name() + ' has no attribute `' + buffer_name + '`') buffer: torch.Tensor = getattr(mod, buffer_name) if buffer_name not in mod._buffers: raise AttributeError('`' + buffer_name + '` is not a buffer') return buffer", - "docstring": "Return the buffer given by `` Raises: AttributeError: If the target string references an invalid path or resolves to something that is not a buffer", - "type": "method", - "file_path": "pytorch\\torch\\nn\\modules\\module.py", - "ast_data": "FunctionDef name:get_buffer arguments arg:self arg:target type:str Assign Call call:rpartition If Raise raises:AttributeError(mod._get_name() + ' has no attribute `' + buffer_name + '`') If Compare op:NotIn Raise raises:AttributeError('`' + buffer_name + '` is not a buffer') Return return:yes" - }, - { - "library": "mongo", - "name": "unpack_response", - "source_code": "def unpack_response(self, cursor_id: Optional[int] = None, codec_options: CodecOptions = _UNICODE_REPLACE_CODEC_OPTIONS, user_fields: Optional[Mapping[str, Any]] = None, legacy_response: bool = False) -> list[dict[str, Any]]: assert not legacy_response return bson._decode_all_selective(self.payload_document, codec_options, user_fields)", - "docstring": "Unpack a OP_MSG command response. :param cursor_id: Ignored, for compatibility with _OpReply. :param codec_options: an instance of :class: :param user_fields: Response fields that should be decoded using the TypeDecoders from codec_options, passed to bson._decode_all_selective.", - "type": "method", - "file_path": "mongo\\pymongo\\message.py", - "ast_data": "FunctionDef name:unpack_response arguments arg:self arg:cursor_id type:Optional[int] arg:codec_options type:CodecOptions arg:user_fields type:Optional[Mapping[str, Any]] arg:legacy_response type:bool Return return:yes" - }, - { - "library": "pandas", - "name": "select_columns_by_name", - "source_code": "@abstractmethod def select_columns_by_name(self, names: Sequence[str]) -> DataFrame: pass", - "docstring": "Create a new DataFrame by selecting a subset of columns by name.", - "type": "method", - "file_path": "pandas\\pandas\\core\\interchange\\dataframe_protocol.py", - "ast_data": "FunctionDef name:select_columns_by_name arguments arg:self arg:names type:Sequence[str]" - }, - { - "library": "tensorflow", - "name": "get_json_type", - "source_code": "def get_json_type(obj): if hasattr(obj, 'get_config'): return {'class_name': obj.__class__.__name__, 'config': obj.get_config()} if type(obj).__module__ = = np.__name__: if isinstance(obj, np.ndarray): return obj.tolist() else: return obj.item() if callable(obj): return obj.__name__ if type(obj).__name__ = = type.__name__: return obj.__name__ if isinstance(obj, tensor_shape.Dimension): return obj.value if isinstance(obj, tensor_shape.TensorShape): return obj.as_list() if isinstance(obj, dtypes.DType): return obj.name if isinstance(obj, collections_abc.Mapping): return dict(obj) if obj is Ellipsis: return {'class_name': '__ellipsis__'} if isinstance(obj, wrapt.ObjectProxy): return obj.__wrapped__ raise TypeError(f'Object {obj} is not JSON-serializable. You may implement a `get_config()` method on the class (returning a JSON-serializable dictionary) to make it serializable.')", - "docstring": "Serializes any object to a JSON-serializable structure. Args: obj: the object to serialize Returns: JSON-serializable structure representing . Raises: TypeError: if cannot be serialized.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\util\\serialization.py", - "ast_data": "FunctionDef name:get_json_type arguments arg:obj If Call call:hasattr Return return:yes If Compare op:Eq If Call call:isinstance Return return:yes Return return:yes If Call call:callable Return return:yes If Compare op:Eq Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes If Call call:isinstance Return return:yes If Compare op:Is Return return:yes If Call call:isinstance Return return:yes Raise raises:TypeError(f'Object {obj} is not JSON-serializable. You may implement a `get_config()` method on the class (returning a JSON-serializable dictionary) to make it serializable.')" - }, - { - "library": "tensorflow", - "name": "get_config", - "source_code": "def get_config(self): return {'reduction': self.reduction, 'name': self.name}", - "docstring": "Returns the config dictionary for a instance.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\losses.py", - "ast_data": "FunctionDef name:get_config arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "time_count_neighbors", - "source_code": "def time_count_neighbors(self, mn1n2, p, probe_radius, boxsize, leafsize, cls): if cls ! = 'cKDTree_weighted': self.T1.count_neighbors(self.T2, probe_radius, p = p) else: self.T1.count_neighbors(self.T2, probe_radius, weights = (self.w1, self.w2), p = p)", - "docstring": "Count neighbors kd-tree dim | # points T1 | # points T2 | p | probe radius | BoxSize | LeafSize | cls", - "type": "method", - "file_path": "scipy\\benchmarks\\benchmarks\\spatial.py", - "ast_data": "FunctionDef name:time_count_neighbors arguments arg:self arg:mn1n2 arg:p arg:probe_radius arg:boxsize arg:leafsize arg:cls If Compare op:NotEq" - }, - { - "library": "django", - "name": "transform", - "source_code": "def transform(self, coord_trans, clone = False): if clone: klone = self.clone() klone.transform(coord_trans) return klone if isinstance(coord_trans, CoordTransform): capi.geom_transform(self.ptr, coord_trans.ptr) elif isinstance(coord_trans, SpatialReference): capi.geom_transform_to(self.ptr, coord_trans.ptr) elif isinstance(coord_trans, (int, str)): sr = SpatialReference(coord_trans) capi.geom_transform_to(self.ptr, sr.ptr) else: raise TypeError('Transform only accepts CoordTransform, SpatialReference, string, and integer objects.')", - "docstring": "Transform this geometry to a different spatial reference system. May take a CoordTransform object, a SpatialReference object, string WKT or PROJ, and/or an integer SRID. By default, return nothing and transform the geometry in-place. However, if the keyword is set, return a transformed clone of this geometry.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\gdal\\geometries.py", - "ast_data": "FunctionDef name:transform arguments arg:self arg:coord_trans arg:clone If Assign Call call:clone Return return:yes If Call call:isinstance If Call call:isinstance If Call call:isinstance Assign Call call:SpatialReference Raise raises:TypeError('Transform only accepts CoordTransform, SpatialReference, string, and integer objects.')" - }, - { - "library": "mongo", - "name": "publish_connection_checked_out", - "source_code": "def publish_connection_checked_out(self, address: _Address, connection_id: int, duration: float) -> None: event = ConnectionCheckedOutEvent(address, connection_id, duration) for subscriber in self.__cmap_listeners: try: subscriber.connection_checked_out(event) except Exception: _handle_exception()", - "docstring": "Publish a :class: to all connection listeners.", - "type": "method", - "file_path": "mongo\\pymongo\\monitoring.py", - "ast_data": "FunctionDef name:publish_connection_checked_out arguments arg:self arg:address type:_Address arg:connection_id type:int arg:duration type:float Assign Call call:ConnectionCheckedOutEvent For Try ExceptHandler" - }, - { - "library": "tensorflow", - "name": "do_not_convert", - "source_code": "@tf_export('autograph.experimental.do_not_convert') def do_not_convert(func = None): if func is None: return do_not_convert def wrapper(*args, **kwargs): with ag_ctx.ControlStatusCtx(status = ag_ctx.Status.DISABLED): return func(*args, **kwargs) if inspect.isfunction(func) or inspect.ismethod(func): wrapper = functools.update_wrapper(wrapper, func) return autograph_artifact(wrapper)", - "docstring": "Decorator that suppresses the conversion of a function. Args: func: function to decorate. Returns: If is not None, returns a which is equivalent to , but is not converted by AutoGraph. If is None, returns a decorator that, when invoked with a single argument, returns a equivalent to the above case.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\impl\\api.py", - "ast_data": "FunctionDef name:do_not_convert arguments arg:func Call call:tf_export If Compare op:Is Return return:yes FunctionDef name:wrapper arguments vararg:args kwarg:kwargs With Return return:yes If BoolOp Call call:isfunction Call call:ismethod Assign Call call:update_wrapper Return return:yes" - }, - { - "library": "authlib", - "name": "save_device_credential", - "source_code": "def save_device_credential(self, client_id, scope, data): raise NotImplementedError()", - "docstring": "Save device token into database for later use. Developers MUST implement this method in subclass:: def save_device_credential(self, client_id, scope, data): item = DeviceCredential(client_id=client_id, scope=scope, **data) item.save()", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\rfc8628\\endpoint.py", - "ast_data": "FunctionDef name:save_device_credential arguments arg:self arg:client_id arg:scope arg:data Raise raises:NotImplementedError()" - }, - { - "library": "algorithms", - "name": "copy_random_pointer_v2", - "source_code": "def copy_random_pointer_v2(head): copy = defaultdict(lambda: RandomListNode(0)) copy[None] = None node = head while node: copy[node].label = node.label copy[node].next = copy[node.next] copy[node].random = copy[node.random] node = node.next return copy[head]", - "docstring": ":type head: RandomListNode :rtype: RandomListNode", - "type": "function", - "file_path": "algorithms\\algorithms\\linkedlist\\copy_random_pointer.py", - "ast_data": "FunctionDef name:copy_random_pointer_v2 arguments arg:head Assign Call call:defaultdict Assign Assign While Assign Assign Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, initializer, default_value, name = None, experimental_is_anonymous = False): self._initializer = initializer self._default_value = default_value self._is_anonymous = experimental_is_anonymous if not self._is_anonymous: self._shared_name = self._initializer._shared_name if not self._shared_name: self._shared_name = 'hash_table_%s' % (str(uuid.uuid4()),) self._name = name or 'hash_table' self._table_name = None super(StaticHashTable, self).__init__(default_value, initializer) self._value_shape = self._default_value.get_shape()", - "docstring": "Creates a non-initialized object. Creates a table, the type of its keys and values are specified by the initializer. Before using the table you will have to initialize it. After initialization the table will be immutable. Args: initializer: The table initializer to use. See kernel for supported key and value types. default_value: The value to use if a key is missing in the table. name: A name for the operation (optional). experimental_is_anonymous: Whether to use anonymous mode for the table (default is False). In anonymous mode, the table resource can only be accessed via a resource handle. It can't be looked up by a name. When all resource handles pointing to that resource are gone, the resource will be deleted automatically. Returns: A object.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:initializer arg:default_value arg:name arg:experimental_is_anonymous Assign Assign Assign If Assign If Assign Assign BoolOp Assign Assign Call call:get_shape" - }, - { - "library": "django", - "name": "sql_with_params", - "source_code": "def sql_with_params(self): return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()", - "docstring": "Return the query as an SQL string and the parameters that will be substituted into the query.", - "type": "method", - "file_path": "django\\django\\db\\models\\sql\\query.py", - "ast_data": "FunctionDef name:sql_with_params arguments arg:self Return return:yes" - }, - { - "library": "pandas", - "name": "validate_multiindex", - "source_code": "def validate_multiindex(self, obj: DataFrame | Series) -> tuple[DataFrame, list[Hashable]]: levels = com.fill_missing_names(obj.index.names) try: reset_obj = obj.reset_index() except ValueError as err: raise ValueError('duplicate names/columns in the multi-index when storing as a table') from err assert isinstance(reset_obj, DataFrame) return (reset_obj, levels)", - "docstring": "validate that we can store the multi-index; reset and return the new object", - "type": "method", - "file_path": "pandas\\pandas\\io\\pytables.py", - "ast_data": "FunctionDef name:validate_multiindex arguments arg:self arg:obj type:DataFrame | Series Assign Call call:fill_missing_names Try Assign Call call:reset_index ExceptHandler Raise raises:ValueError('duplicate names/columns in the multi-index when storing as a table') Return return:yes" - }, - { - "library": "matplotlib", - "name": "fill", - "source_code": "def fill(self, *args): if len(args): _fillcolor = args[0] else: _fillcolor = self._fillcolor return self._hatch or (_fillcolor is not None and (len(_fillcolor) < = 3 or _fillcolor[3] ! = 0.0))", - "docstring": "Predicate: does the path need to be filled? An optional argument can be used to specify an alternative _fillcolor, as needed by RendererPdf.draw_markers.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backends\\backend_pdf.py", - "ast_data": "FunctionDef name:fill arguments arg:self vararg:args If Call call:len Assign Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "insert_type_promotion_nodes", - "source_code": "def insert_type_promotion_nodes(graph_module: torch.fx.GraphModule) -> None: for module in graph_module.modules(): assert isinstance(module, torch.fx.GraphModule) passes.InsertTypePromotion(module).run()", - "docstring": "Inplace pass to insert explicit type promotion nodes, recursively through nested modules.", - "type": "function", - "file_path": "pytorch\\torch\\onnx\\_internal\\exporter\\_fx_passes.py", - "ast_data": "FunctionDef name:insert_type_promotion_nodes arguments arg:graph_module type:torch.fx.GraphModule For Call call:modules" - }, - { - "library": "scipy", - "name": "mean", - "source_code": "def mean(input, labels = None, index = None): count, sum = _stats(input, labels, index) return sum / np.asanyarray(count).astype(np.float64)", - "docstring": "Calculate the mean of the values of an array at labels. Parameters ---------- input : array_like Array on which to compute the mean of elements over distinct regions. labels : array_like, optional Array of labels of same shape, or broadcastable to the same shape as . All elements sharing the same label form one region over which the mean of the elements is computed. index : int or sequence of ints, optional Labels of the objects over which the mean is to be computed. Default is None, in which case the mean for all values where label is greater than 0 is calculated. Returns ------- out : list Sequence of same length as , with the mean of the different regions labeled by the labels in . See Also -------- variance, standard_deviation, minimum, maximum, sum, label Examples -------- >>> from scipy import ndimage >>> import numpy as np >>> a = np.arange(25).reshape((5,5)) >>> labels = np.zeros_like(a) >>> labels[3:5,3:5] = 1 >>> index = np.unique(labels) >>> labels array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 1, 1], [0, 0, 0, 1, 1]]) >>> index array([0, 1]) >>> ndimage.mean(a, labels=labels, index=index) [10.285714285714286, 21.0]", - "type": "function", - "file_path": "scipy\\scipy\\ndimage\\_measurements.py", - "ast_data": "FunctionDef name:mean arguments arg:input arg:labels arg:index Assign Call call:_stats Return return:yes" - }, - { - "library": "authlib", - "name": "generate", - "source_code": "def generate(self, grant_type, client, user = None, scope = None, expires_in = None, include_refresh_token = True): scope = self.get_allowed_scope(client, scope) access_token = self.access_token_generator(client = client, grant_type = grant_type, user = user, scope = scope) if expires_in is None: expires_in = self._get_expires_in(client, grant_type) token = {'token_type': 'Bearer', 'access_token': access_token} if expires_in: token['expires_in'] = expires_in if include_refresh_token and self.refresh_token_generator: token['refresh_token'] = self.refresh_token_generator(client = client, grant_type = grant_type, user = user, scope = scope) if scope: token['scope'] = scope return token", - "docstring": "Generate a bearer token for OAuth 2.0 authorization token endpoint. :param client: the client that making the request. :param grant_type: current requested grant_type. :param user: current authorized user. :param expires_in: if provided, use this value as expires_in. :param scope: current requested scope. :param include_refresh_token: should refresh_token be included. :return: Token dict", - "type": "method", - "file_path": "authlib\\authlib\\oauth2\\rfc6750\\token.py", - "ast_data": "FunctionDef name:generate arguments arg:self arg:grant_type arg:client arg:user arg:scope arg:expires_in arg:include_refresh_token Assign Call call:get_allowed_scope Assign Call call:access_token_generator If Compare op:Is Assign Call call:_get_expires_in Assign If Assign If BoolOp Assign Call call:refresh_token_generator If Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "state_dict", - "source_code": "def state_dict(self) -> dict[str, Any]: data_groups = self._get_serializable_data_groups() state = self._convert_mask(self.state) return {'state': state, 'data_groups': data_groups, 'defaults': self.defaults}", - "docstring": "Returns the state of the sparsifier as a :class:. It contains: * state - contains name -> mask mapping. * data_groups - a dictionary containing all config information for each layer * defaults - the default config while creating the constructor", - "type": "method", - "file_path": "pytorch\\torch\\ao\\pruning\\_experimental\\activation_sparsifier\\activation_sparsifier.py", - "ast_data": "FunctionDef name:state_dict arguments arg:self Assign Call call:_get_serializable_data_groups Assign Call call:_convert_mask Return return:yes" - }, - { - "library": "mongo", - "name": "select_server", - "source_code": "async def select_server(self, selector: Callable[[Selection], Selection], operation: str, server_selection_timeout: Optional[float] = None, address: Optional[_Address] = None, deprioritized_servers: Optional[list[Server]] = None, operation_id: Optional[int] = None) -> Server: server = await self._select_server(selector, operation, server_selection_timeout, address, deprioritized_servers, operation_id = operation_id) if _csot.get_timeout(): _csot.set_rtt(server.description.min_round_trip_time) if _SERVER_SELECTION_LOGGER.isEnabledFor(logging.DEBUG): _debug_log(_SERVER_SELECTION_LOGGER, message = _ServerSelectionStatusMessage.SUCCEEDED, selector = selector, operation = operation, operationId = operation_id, topologyDescription = self.description, clientId = self.description._topology_settings._topology_id, serverHost = server.description.address[0], serverPort = server.description.address[1]) return server", - "docstring": "Like select_servers, but choose a random server if several match.", - "type": "method", - "file_path": "mongo\\pymongo\\asynchronous\\topology.py", - "ast_data": "AsyncFunctionDef name:select_server arguments arg:self arg:selector type:Callable[[Selection], Selection] arg:operation type:str arg:server_selection_timeout type:Optional[float] arg:address type:Optional[_Address] arg:deprioritized_servers type:Optional[list[Server]] arg:operation_id type:Optional[int] Assign If Call call:get_timeout If Call call:isEnabledFor Return return:yes" - }, - { - "library": "scipy", - "name": "kurtosis", - "source_code": "def kurtosis(a, axis = 0, fisher = True, bias = True): a, axis = _chk_asarray(a, axis) mean = a.mean(axis, keepdims = True) m2 = _moment(a, 2, axis, mean = mean) m4 = _moment(a, 4, axis, mean = mean) zero = m2 < = (np.finfo(m2.dtype).resolution * mean.squeeze(axis)) ** 2 with np.errstate(all = 'ignore'): vals = ma.where(zero, 0, m4 / m2 ** 2.0) if not bias and zero is not ma.masked and (m2 is not ma.masked): n = a.count(axis) can_correct = ~zero & (n > 3) if can_correct.any(): n = np.extract(can_correct, n) m2 = np.extract(can_correct, m2) m4 = np.extract(can_correct, m4) nval = 1.0 / (n - 2) / (n - 3) * ((n * n - 1.0) * m4 / m2 ** 2.0 - 3 * (n - 1) ** 2.0) np.place(vals, can_correct, nval + 3.0) if fisher: return vals - 3 else: return vals", - "docstring": "Computes the kurtosis (Fisher or Pearson) of a dataset. Kurtosis is the fourth central moment divided by the square of the variance. If Fisher's definition is used, then 3.0 is subtracted from the result to give 0.0 for a normal distribution. If bias is False then the kurtosis is calculated using k statistics to eliminate bias coming from biased moment estimators Use to see if result is close enough to normal. Parameters ---------- a : array data for which the kurtosis is calculated axis : int or None, optional Axis along which the kurtosis is calculated. Default is 0. If None, compute over the whole array . fisher : bool, optional If True, Fisher's definition is used (normal ==> 0.0). If False, Pearson's definition is used (normal ==> 3.0). bias : bool, optional If False, then the calculations are corrected for statistical bias. Returns ------- kurtosis : array The kurtosis of values along an axis. If all values are equal, return -3 for Fisher's definition and 0 for Pearson's definition. Notes ----- For more details about , see .", - "type": "function", - "file_path": "scipy\\scipy\\stats\\_mstats_basic.py", - "ast_data": "FunctionDef name:kurtosis arguments arg:a arg:axis arg:fisher arg:bias Assign Call call:_chk_asarray Assign Call call:mean Assign Call call:_moment Assign Call call:_moment Assign Compare op:LtE With Assign Call call:where If BoolOp Compare op:IsNot Compare op:IsNot Assign Call call:count Assign If Call call:any Assign Call call:extract Assign Call call:extract Assign Call call:extract Assign If Return return:yes Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_xy", - "source_code": "def get_xy(self): return (self._x0, self._y0)", - "docstring": "Return the left and bottom coords of the rectangle as a tuple.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:get_xy arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "size", - "source_code": "@dispatch.dispatch_for_api(array_ops.size_v2) def size(input: ragged_tensor.Ragged, out_type = dtypes.int32, name = None): if ragged_tensor.is_ragged(input): return array_ops.size(input.flat_values, out_type = out_type, name = name) else: return array_ops.size(input, out_type = out_type, name = name)", - "docstring": "Returns the size of a potentially ragged tensor. The size of a ragged tensor is the size of its inner values. #### Example: >>> tf.size(tf.ragged.constant([[1, 2], [3]])).numpy().item() 3 Args: input: A potentially ragged . out_type: The numeric output type for the operation. name: A name for the operation (optional). Returns: A Tensor of type .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ragged\\ragged_array_ops.py", - "ast_data": "FunctionDef name:size arguments arg:input type:ragged_tensor.Ragged arg:out_type arg:name Call call:dispatch_for_api If Call call:is_ragged Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "AvgPool1d", - "source_code": "class AvgPool1d(_AvgPoolNd): kernel_size: _size_1_t stride: _size_1_t padding: _size_1_t ceil_mode: bool count_include_pad: bool def __init__(self, kernel_size: _size_1_t, stride: _size_1_t = None, padding: _size_1_t = 0, ceil_mode: bool = False, count_include_pad: bool = True) -> None: super().__init__() self.kernel_size = _single(kernel_size) self.stride = _single(stride if stride is not None else kernel_size) self.padding = _single(padding) self.ceil_mode = ceil_mode self.count_include_pad = count_include_pad def forward(self, input: Tensor) -> Tensor: return F.avg_pool1d(input, self.kernel_size, self.stride, self.padding, self.ceil_mode, self.count_include_pad)", - "docstring": "Applies a 1D average pooling over an input signal composed of several input planes. In the simplest case, the output value of the layer with input size :math:, output :math: and :attr: :math: can be precisely described as: .. math:: \\text{out}(N_i, C_j, l) = \\frac{1}{k} \\sum_{m=0}^{k-1} \\text{input}(N_i, C_j, \\text{stride} \\times l + m) If :attr: is non-zero, then the input is implicitly zero-padded on both sides for :attr: number of points. Note: When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding or the input. Sliding windows that would start in the right padded region are ignored. .. note:: pad should be at most half of effective kernel size. The parameters :attr:, :attr:, :attr: can each be an `kernel_sizeceilfloor(N, C, L_{in})(C, L_{in})(N, C, L_{out})(C, L_{out})(L_{out} - 1) \\times \\text{stride} \\geq L_{in} + \\text{padding}L_{out}` being reduced by one. Examples:: >>> # pool with window of size=3, stride=2 >>> m = nn.AvgPool1d(3, stride=2) >>> m(torch.tensor([[[1., 2, 3, 4, 5, 6, 7]]])) tensor([[[2., 4., 6.]]])", - "type": "class", - "file_path": "pytorch\\torch\\nn\\modules\\pooling.py", - "ast_data": "ClassDef name:AvgPool1d FunctionDef name:__init__ arguments arg:self arg:kernel_size type:_size_1_t arg:stride type:_size_1_t arg:padding type:_size_1_t arg:ceil_mode type:bool arg:count_include_pad type:bool Assign Call call:_single Assign Call call:_single Assign Call call:_single Assign Assign FunctionDef name:forward arguments arg:self arg:input type:Tensor Return return:yes" - }, - { - "library": "tensorflow", - "name": "print_dict", - "source_code": "def print_dict(py_dict): for gpu, cc in py_dict.items(): print('{: <25}{: <25}'.format(gpu, cc))", - "docstring": "Prints dictionary with formatting (2 column table). Args: py_dict: Dictionary that is to be printed out in a table format.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\tools\\tensorflow_builder\\config_detector\\data\\cuda_compute_capability.py", - "ast_data": "FunctionDef name:print_dict arguments arg:py_dict For Call call:items" - }, - { - "library": "pytorch", - "name": "check_and_add_duplicate_pattern", - "source_code": "def check_and_add_duplicate_pattern(pattern: PatternExpr, graph: Optional[torch.fx.Graph], seen_patterns: dict[str, list[Optional[str]]], skip_duplicates: bool = False) -> bool: pattern_repr = PatternPrettyPrinter.run(pattern) equiv_pattern_reprs = seen_patterns.get(pattern_repr) if not equiv_pattern_reprs: seen_patterns[pattern_repr].append(str(graph) if graph else None) return False if graph is None: if skip_duplicates: return True torch._check(False, lambda: f'Duplicate pattern: {pattern_repr} with no graph') new_graph_str = str(graph) for graph_str in equiv_pattern_reprs: if not new_graph_str = = graph_str: continue if skip_duplicates: return True torch._check(False, lambda: f'Duplicate pattern: {pattern_repr} with duplicated match graph {graph_str} ') equiv_pattern_reprs.append(new_graph_str) return False", - "docstring": "Check if a pattern is a duplicate. Because we ignore certain types in searching, but not in matching, use the graph to distinguish equivalent search patterns. Returns True if a duplicate is found and is passed in. Errors if is False and a duplicate is found.", - "type": "function", - "file_path": "pytorch\\torch\\_inductor\\pattern_matcher.py", - "ast_data": "FunctionDef name:check_and_add_duplicate_pattern arguments arg:pattern type:PatternExpr arg:graph type:Optional[torch.fx.Graph] arg:seen_patterns type:dict[str, list[Optional[str]]] arg:skip_duplicates type:bool Assign Call call:run Assign Call call:get If Return return:yes If Compare op:Is If Return return:yes Assign Call call:str For If If Return return:yes Return return:yes" - }, - { - "library": "kornia", - "name": "normalize_min_max", - "source_code": "def normalize_min_max(x: Tensor, min_val: float = 0.0, max_val: float = 1.0, eps: float = 1e-06) -> Tensor: if not isinstance(x, Tensor): raise TypeError(f'data should be a tensor. Got: {type(x)}.') if not isinstance(min_val, float): raise TypeError(f\"'min_val' should be a float. Got: {type(min_val)}.\") if not isinstance(max_val, float): raise TypeError(f\"'b' should be a float. Got: {type(max_val)}.\") if len(x.shape) < 3: raise ValueError(f'Input shape must be at least a 3d tensor. Got: {x.shape}.') shape = x.shape B, C = (shape[0], shape[1]) x_min: Tensor = x.view(B, C, -1).min(-1)[0].view(B, C, 1) x_max: Tensor = x.view(B, C, -1).max(-1)[0].view(B, C, 1) x_out: Tensor = (max_val - min_val) * (x.view(B, C, -1) - x_min) / (x_max - x_min + eps) + min_val return x_out.view(shape)", - "docstring": "Normalise an image/video tensor by MinMax and re-scales the value between a range. The data is normalised using the following formulation: .. math:: y_i = (b - a) * \\frac{x_i - \\text{min}(x)}{\\text{max}(x) - \\text{min}(x)} + a where :math: is :math: and :math: is :math:. Args: x: The image tensor to be normalised with shape :math:. min_val: The minimum value for the new range. max_val: The maximum value for the new range. eps: Float number to avoid zero division. Returns: The normalised image tensor with same shape as input :math:. Example: >>> x = torch.rand(1, 5, 3, 3) >>> x_norm = normalize_min_max(x, min_val=-1., max_val=1.) >>> x_norm.min() tensor(-1.) >>> x_norm.max() tensor(1.0000)", - "type": "function", - "file_path": "kornia\\kornia\\enhance\\normalize.py", - "ast_data": "FunctionDef name:normalize_min_max arguments arg:x type:Tensor arg:min_val type:float arg:max_val type:float arg:eps type:float If Raise raises:TypeError(f'data should be a tensor. Got: {type(x)}.') If Raise raises:TypeError(f\"'min_val' should be a float. Got: {type(min_val)}.\") If Raise raises:TypeError(f\"'b' should be a float. Got: {type(max_val)}.\") If Compare op:Lt Raise raises:ValueError(f'Input shape must be at least a 3d tensor. Got: {x.shape}.') Assign Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "f", - "source_code": "def f(original_nodes): if not original_graph: return None useful_ops = [] for func, name in original_nodes: try: if not func: useful_ops.append((func, original_graph.get_operation_by_name(name))) else: sub_func = original_graph._get_function(func) if isinstance(sub_func, function.AtomicFunction): useful_ops.append((func, sub_func.graph.get_operation_by_name(name))) else: sys.stderr.write(\"Use '@tf.function' or '@defun' to decorate the function.\\n\") continue except KeyError: continue return _error_interpolation.create_graph_debug_info_def(useful_ops)", - "docstring": "Function to create for the given .", - "type": "function", - "file_path": "tensorflow\\tensorflow\\lite\\python\\util.py", - "ast_data": "FunctionDef name:f arguments arg:original_nodes If Return return:yes Assign For Try If Assign Call call:_get_function If Call call:isinstance ExceptHandler Return return:yes" - }, - { - "library": "django", - "name": "datetime_trunc_sql", - "source_code": "def datetime_trunc_sql(self, lookup_type, sql, params, tzname): raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunc_sql() method')", - "docstring": "Given a lookup_type of 'year', 'month', 'day', 'hour', 'minute', or 'second', return the SQL that truncates the given datetime field field_name to a datetime object with only the given specificity.", - "type": "method", - "file_path": "django\\django\\db\\backends\\base\\operations.py", - "ast_data": "FunctionDef name:datetime_trunc_sql arguments arg:self arg:lookup_type arg:sql arg:params arg:tzname Raise raises:NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_trunc_sql() method')" - }, - { - "library": "cherrypy", - "name": "read_into_file", - "source_code": "def read_into_file(self, fp_out = None): if fp_out is None: fp_out = self.make_file() self.read_lines_to_boundary(fp_out = fp_out) return fp_out", - "docstring": "Read the request body into fp_out (or make_file() if None). Return fp_out.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\_cpreqbody.py", - "ast_data": "FunctionDef name:read_into_file arguments arg:self arg:fp_out If Compare op:Is Assign Call call:make_file Return return:yes" - }, - { - "library": "tensorflow", - "name": "ZerosLikeForExit", - "source_code": "def ZerosLikeForExit(self, val): val_shape = val.get_shape() forward_ctxt = val.op._get_control_flow_context() outer_forward_ctxt = forward_ctxt.outer_context if outer_forward_ctxt: outer_forward_ctxt = outer_forward_ctxt.GetWhileContext() outer_grad_state = None if outer_forward_ctxt: outer_grad_state = self._map.get(outer_forward_ctxt) if outer_grad_state: if val_shape.is_fully_defined(): outer_grad_state.grad_context.Enter() result = array_ops.zeros(val_shape.dims, val.dtype) outer_grad_state.grad_context.Exit() else: forward_ctxt.outer_context.Enter() shape = array_ops.shape_internal(val, optimize = False) forward_ctxt.outer_context.Exit() history_shape = outer_grad_state.AddForwardAccumulator(shape) outer_grad_ctxt = outer_grad_state.grad_context outer_grad_ctxt.Enter() real_shape = outer_grad_state.AddBackpropAccumulatedValue(history_shape, shape) result = array_ops.zeros(real_shape, val.dtype) outer_grad_ctxt.Exit() elif val_shape.is_fully_defined(): result = array_ops.zeros(val_shape.dims, val.dtype) else: result = array_ops.zeros_like(val, optimize = False) return result", - "docstring": "Create zeros_like gradient for a loop exit. If the result of a loop variable is not used but is involved in computing the result of some needed loop variable, we create a zero-valued tensor that is fed as gradient for the Exit node of that loop variable. Note that val.op is an Exit, and this method must be called in the control flow context where gradients() is called. Args: val: The output tensor of an Exit op. Returns: A zero tensor of the same shape of val.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\control_flow_state.py", - "ast_data": "FunctionDef name:ZerosLikeForExit arguments arg:self arg:val Assign Call call:get_shape Assign Call call:_get_control_flow_context Assign If Assign Call call:GetWhileContext Assign If Assign Call call:get If If Call call:is_fully_defined Assign Call call:zeros Assign Call call:shape_internal Assign Call call:AddForwardAccumulator Assign Assign Call call:AddBackpropAccumulatedValue Assign Call call:zeros If Call call:is_fully_defined Assign Call call:zeros Assign Call call:zeros_like Return return:yes" - }, - { - "library": "tensorflow", - "name": "value", - "source_code": "def value(self): return self._snapshot", - "docstring": "Returns the last snapshot of this variable. You usually do not need to call this method as all ops that need the value of the variable call it automatically through a call. Returns a which holds the value of the variable. You can not assign a new value to this tensor as it is not a reference to the variable. To avoid copies, if the consumer of the returned value is on the same device as the variable, this actually returns the live value of the variable, not a copy. Updates to the variable are seen by the consumer. If the consumer is on a different device it will get a copy of the variable. Returns: A containing the value of the variable.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\ref_variable.py", - "ast_data": "FunctionDef name:value arguments arg:self Return return:yes" - }, - { - "library": "django", - "name": "is_hidden", - "source_code": "@property def is_hidden(self): return self.field.widget.is_hidden", - "docstring": "Return True if this BoundField's widget is hidden.", - "type": "method", - "file_path": "django\\django\\forms\\boundfield.py", - "ast_data": "FunctionDef name:is_hidden arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "__deepcopy__", - "source_code": "def __deepcopy__(self, memo): with distribute_lib.enter_or_assert_strategy(self._distribute_strategy): new_values = [] for value in self._values: with ops.device(value.device): new_values.append(copy.deepcopy(value, memo)) copied_variable = type(self)(strategy = self._distribute_strategy, values = new_values, aggregation = self._aggregation, var_policy = copy.deepcopy(self._policy, memo)) memo[id(self)] = copied_variable return copied_variable", - "docstring": "Perform a deepcopy of the . Unlike the deepcopy of a regular tf.Variable, this keeps the original strategy and devices of the . To avoid confusion with the behavior of deepcopy on a regular (which does copy into new devices), we only allow a deepcopy of a within its originating strategy scope. Args: memo: The memoization object for . Returns: A deep copy of the current . Raises: RuntimeError: If trying to deepcopy into a different strategy.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\distribute\\values.py", - "ast_data": "FunctionDef name:__deepcopy__ arguments arg:self arg:memo With Assign For With Assign Call Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_definition_directive", - "source_code": "def get_definition_directive(self, node, directive, arg, default): defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ()) if not defs: return default arg_values_found = [] for def_ in defs: if directive in def_.directives and arg in def_.directives[directive]: arg_values_found.append(def_.directives[directive][arg]) if not arg_values_found: return default if len(arg_values_found) = = 1: return arg_values_found[0] first_value = arg_values_found[0] for other_value in arg_values_found[1:]: if not ast_util.matches(first_value, other_value): qn = anno.getanno(node, anno.Basic.QN) raise ValueError('%s has ambiguous annotations for %s(%s): %s, %s' % (qn, directive.__name__, arg, parser.unparse(other_value).strip(), parser.unparse(first_value).strip())) return first_value", - "docstring": "Returns the unique directive argument for a symbol. See lang/directives.py for details on directives. Example: # Given a directive in the code: ag.foo_directive(bar, baz=1) # One can write for an AST node Name(id='bar'): get_definition_directive(node, ag.foo_directive, 'baz') Args: node: ast.AST, the node representing the symbol for which the directive argument is needed. directive: Callable[..., Any], the directive to search. arg: str, the directive argument to return. default: Any Raises: ValueError: if conflicting annotations have been found", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\autograph\\core\\converter.py", - "ast_data": "FunctionDef name:get_definition_directive arguments arg:self arg:node arg:directive arg:arg arg:default Assign Call call:getanno If Return return:yes Assign For If BoolOp Compare op:In Compare op:In If Return return:yes If Compare op:Eq Return return:yes Assign For If Assign Call call:getanno Raise raises:ValueError('%s has ambiguous annotations for %s(%s): %s, %s' % (qn, directive.__name__, arg, parser.unparse(other_value).strip(), parser.unparse(first_value).strip())) Return return:yes" - }, - { - "library": "scrapy", - "name": "load_object", - "source_code": "def load_object(path: str | Callable[..., Any]) -> Any: if not isinstance(path, str): if callable(path): return path raise TypeError(f'Unexpected argument type, expected string or object, got: {type(path)}') try: dot = path.rindex('.') except ValueError: raise ValueError(f\"Error loading object '{path}': not a full path\") module, name = (path[: dot], path[dot + 1:]) mod = import_module(module) try: obj = getattr(mod, name) except AttributeError: raise NameError(f\"Module '{module}' doesn't define any object named '{name}'\") return obj", - "docstring": "Load an object given its absolute object path, and return it. The object can be the import path of a class, function, variable or an instance, e.g. 'scrapy.downloadermiddlewares.redirect.RedirectMiddleware'. If `` is not a string, but is a callable object, such as a class or a function, then return it as is.", - "type": "function", - "file_path": "scrapy\\scrapy\\utils\\misc.py", - "ast_data": "FunctionDef name:load_object arguments arg:path type:str | Callable[..., Any] If If Call call:callable Return return:yes Raise raises:TypeError(f'Unexpected argument type, expected string or object, got: {type(path)}') Try Assign Call call:rindex ExceptHandler Raise raises:ValueError(f\"Error loading object '{path}': not a full path\") Assign Assign Call call:import_module Try Assign Call call:getattr ExceptHandler Raise raises:NameError(f\"Module '{module}' doesn't define any object named '{name}'\") Return return:yes" - }, - { - "library": "pandas", - "name": "autocorr", - "source_code": "def autocorr(self, lag: int = 1) -> float: return self.corr(cast(Series, self.shift(lag)))", - "docstring": "Compute the lag-N autocorrelation. This method computes the Pearson correlation between the Series and its shifted self. Parameters ---------- lag : int, default 1 Number of lags to apply before performing autocorrelation. Returns ------- float The Pearson correlation between self and self.shift(lag). See Also -------- Series.corr : Compute the correlation between two Series. Series.shift : Shift index by desired number of periods. DataFrame.corr : Compute pairwise correlation of columns. DataFrame.corrwith : Compute pairwise correlation between rows or columns of two DataFrame objects. Notes ----- If the Pearson correlation is not well defined return 'NaN'. Examples -------- >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) >>> s.autocorr() # doctest: +ELLIPSIS 0.10355... >>> s.autocorr(lag=2) # doctest: +ELLIPSIS -0.99999... If the Pearson correlation is not well defined, then 'NaN' is returned. >>> s = pd.Series([1, 0, 0, 0]) >>> s.autocorr() nan", - "type": "method", - "file_path": "pandas\\pandas\\core\\series.py", - "ast_data": "FunctionDef name:autocorr arguments arg:self arg:lag type:int Return return:yes" - }, - { - "library": "cherrypy", - "name": "serve_download", - "source_code": "def serve_download(path, name = None): return serve_file(path, 'application/x-download', 'attachment', name)", - "docstring": "Serve 'path' as an application/x-download attachment.", - "type": "function", - "file_path": "cherrypy\\cherrypy\\lib\\static.py", - "ast_data": "FunctionDef name:serve_download arguments arg:path arg:name Return return:yes" - }, - { - "library": "prospector", - "name": "blend_line", - "source_code": "def blend_line(messages: list[Message], blend_combos: Optional[list[list[tuple[str, str]]]] = None) -> list[Message]: blend_combos = blend_combos or BLEND_COMBOS blend_lists: list[list[Message]] = [[] for _ in range(len(blend_combos))] blended: list[Message] = [] for message in messages: key = (message.source, message.code) found = False for blend_combo_idx, blend_combo in enumerate(blend_combos): if key in blend_combo: found = True blend_lists[blend_combo_idx].append(message) if not found: blended.append(message) for blend_combo_idx, blend_list in enumerate(blend_lists): if len(blend_list) = = 0: continue blend_list.sort(key = lambda msg: blend_combos[blend_combo_idx].index((msg.source, msg.code))) if blend_list[0] not in blended: blended.append(blend_list[0]) for now_used in blend_list[1:]: now_used.used = True return [m for m in blended if not getattr(m, 'used', False)]", - "docstring": "Given a list of messages on the same line, blend them together so that we end up with one message per actual problem. Note that we can still return more than one message here if there are two or more different errors for the line.", - "type": "function", - "file_path": "prospector\\prospector\\blender.py", - "ast_data": "FunctionDef name:blend_line arguments arg:messages type:list[Message] arg:blend_combos type:Optional[list[list[tuple[str, str]]]] Assign BoolOp For Assign Assign For Call call:enumerate If Compare op:In Assign If For Call call:enumerate If Compare op:Eq If Compare op:NotIn For Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "evaluate_guards_for_args", - "source_code": "def evaluate_guards_for_args(self, placeholders: Sequence[FakeTensor], args: Sequence[Tensor], *, ignore_static: bool = True) -> bool: code = self.produce_guards_expression(placeholders, ignore_static = ignore_static) if code: return self.evaluate_guards_expression(code, args) return True", - "docstring": "Generate guards for a graph's placeholder values and evaluate the guards with args", - "type": "method", - "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", - "ast_data": "FunctionDef name:evaluate_guards_for_args arguments arg:self arg:placeholders type:Sequence[FakeTensor] arg:args type:Sequence[Tensor] Assign Call call:produce_guards_expression If Return return:yes Return return:yes" - }, - { - "library": "tensorflow", - "name": "deserialize", - "source_code": "def deserialize(proto): _, type_registrations = _REVIVED_TYPE_REGISTRY.get(proto.identifier, (None, None)) if type_registrations is not None: for type_registration in type_registrations: if type_registration.should_load(proto): return (type_registration.from_proto(proto), type_registration.setter) return None", - "docstring": "Create a trackable object from a SavedUserObject proto. Args: proto: A SavedUserObject to deserialize. Returns: A tuple of (trackable, assignment_fn) where assignment_fn has the same signature as setattr and should be used to add dependencies to when they are available.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\saved_model\\revived_types.py", - "ast_data": "FunctionDef name:deserialize arguments arg:proto Assign Call call:get If Compare op:IsNot For If Call call:should_load Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "add_to_set", - "source_code": "def add_to_set(self, event_name: str, key: str, value: Any): if event_name not in self.get_stack(): raise RuntimeError(f\"Event {repr(event_name)} not in {self.get_stack()}. Cannot add metadata to events that aren't in progress. Please make sure the event has started and hasn't ended.\") event_data = self.get_event_data() if event_name not in event_data: event_data[event_name] = {} if key not in event_data[event_name]: event_data[event_name][key] = set() event_data[event_name][key].add(value)", - "docstring": "Add a value to a set within a event_name's metadata if it exists", - "type": "method", - "file_path": "pytorch\\torch\\_dynamo\\utils.py", - "ast_data": "FunctionDef name:add_to_set arguments arg:self arg:event_name type:str arg:key type:str arg:value type:Any If Compare op:NotIn Raise raises:RuntimeError(f\"Event {repr(event_name)} not in {self.get_stack()}. Cannot add metadata to events that aren't in progress. Please make sure the event has started and hasn't ended.\") Assign Call call:get_event_data If Compare op:NotIn Assign If Compare op:NotIn Assign Call call:set" - }, - { - "library": "matplotlib", - "name": "set_y", - "source_code": "def set_y(self, y): self._y = y self.stale = True", - "docstring": "Set the bottom coord of the rectangle. Parameters ---------- y : float", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\patches.py", - "ast_data": "FunctionDef name:set_y arguments arg:self arg:y Assign Assign" - }, - { - "library": "django", - "name": "create", - "source_code": "def create(self, **kwargs): reverse_one_to_one_fields = frozenset(kwargs).intersection(self.model._meta._reverse_one_to_one_field_names) if reverse_one_to_one_fields: raise ValueError('The following fields do not exist in this model: %s' % ', '.join(reverse_one_to_one_fields)) obj = self.model(**kwargs) self._for_write = True obj.save(force_insert = True, using = self.db) return obj", - "docstring": "Create a new object with the given kwargs, saving it to the database and returning the created object.", - "type": "method", - "file_path": "django\\django\\db\\models\\query.py", - "ast_data": "FunctionDef name:create arguments arg:self kwarg:kwargs Assign Call call:intersection If Raise raises:ValueError('The following fields do not exist in this model: %s' % ', '.join(reverse_one_to_one_fields)) Assign Call call:model Assign Return return:yes" - }, - { - "library": "numpy", - "name": "getargs", - "source_code": "def getargs(co): if not iscode(co): raise TypeError('arg is not a code object') nargs = co.co_argcount names = co.co_varnames args = list(names[: nargs]) for i in range(nargs): if args[i][: 1] in ['', '.']: raise TypeError('tuple function arguments are not supported') varargs = None if co.co_flags & CO_VARARGS: varargs = co.co_varnames[nargs] nargs = nargs + 1 varkw = None if co.co_flags & CO_VARKEYWORDS: varkw = co.co_varnames[nargs] return (args, varargs, varkw)", - "docstring": "Get information about the arguments accepted by a code object. Three things are returned: (args, varargs, varkw), where 'args' is a list of argument names (possibly containing nested lists), and 'varargs' and 'varkw' are the names of the * and ** arguments or None.", - "type": "function", - "file_path": "numpy\\numpy\\_utils\\_inspect.py", - "ast_data": "FunctionDef name:getargs arguments arg:co If Raise raises:TypeError('arg is not a code object') Assign Assign Assign Call call:list For Call call:range If Compare op:In Raise raises:TypeError('tuple function arguments are not supported') Assign If Assign Assign Assign If Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "first_two_eq", - "source_code": "@register_refinement_rule(torch.nn.AdaptiveAvgPool2d) @register_refinement_rule(torch.nn.MaxPool2d) def first_two_eq(n: Node): res = [] assert isinstance(n.args[0], Node) arg_type = n.args[0].type if isinstance(arg_type, TensorType) and isinstance(n.type, TensorType): args1 = arg_type.__args__ args2 = n.type.__args__ res = [Equality(args1[0], args2[0]), Equality(args1[1], args2[1])] return res", - "docstring": "For operations where the first two dimensions of the input and output shape are equal", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\graph_gradual_typechecker.py", - "ast_data": "FunctionDef name:first_two_eq arguments arg:n type:Node Call call:register_refinement_rule Call call:register_refinement_rule Assign Assign If BoolOp Call call:isinstance Call call:isinstance Assign Assign Assign Return return:yes" - }, - { - "library": "sphinx", - "name": "get_matching_files", - "source_code": "def get_matching_files(dirname: str | os.PathLike[str], include_patterns: Iterable[str] = ('**',), exclude_patterns: Iterable[str] = ()) -> Iterator[str]: dirname = Path(dirname).resolve() exclude_matchers = compile_matchers(exclude_patterns) include_matchers = compile_matchers(include_patterns) for root, dirs, files in os.walk(dirname, followlinks = True): relative_root = os.path.relpath(root, dirname) if relative_root = = '.': relative_root = '' relative_root_path = Path(relative_root) included_files = [] for entry in sorted(files): entry = _unicode_nfc((relative_root_path / entry).as_posix()) keep = False for matcher in include_matchers: if matcher(entry): keep = True break for matcher in exclude_matchers: if matcher(entry): keep = False break if keep: included_files.append(entry) filtered_dirs = [] for dir_name in sorted(dirs): normalised = _unicode_nfc((relative_root_path / dir_name).as_posix()) for matcher in exclude_matchers: if matcher(normalised): break else: filtered_dirs.append(dir_name) dirs[:] = filtered_dirs yield from included_files", - "docstring": "Get all file names in a directory, recursively. Filter file names by the glob-style include_patterns and exclude_patterns. The default values include all files (\"**\") and exclude nothing (\"\"). Only files matching some pattern in *include_patterns* are included, and exclusions from *exclude_patterns* take priority over inclusions.", - "type": "function", - "file_path": "sphinx\\sphinx\\util\\matching.py", - "ast_data": "FunctionDef name:get_matching_files arguments arg:dirname type:str | os.PathLike[str] arg:include_patterns type:Iterable[str] arg:exclude_patterns type:Iterable[str] Assign Call call:resolve Assign Call call:compile_matchers Assign Call call:compile_matchers For Call call:walk Assign Call call:relpath If Compare op:Eq Assign Assign Call call:Path Assign For Call call:sorted Assign Call call:_unicode_nfc Assign For If Call call:matcher Assign For If Call call:matcher Assign If Assign For Call call:sorted Assign Call call:_unicode_nfc For If Call call:matcher Assign" - }, - { - "library": "tensorflow", - "name": "minimum", - "source_code": "@dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def minimum(x, y): return math_ops.minimum(x, y)", - "docstring": "Element-wise minimum of two tensors. Args: x: Tensor or variable. y: Tensor or variable. Returns: A tensor.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\backend.py", - "ast_data": "FunctionDef name:minimum arguments arg:x arg:y Return return:yes" - }, - { - "library": "flexx", - "name": "run", - "source_code": "def run(): server = current_server() server._auto_stop = True return start()", - "docstring": "Start the event loop in desktop app mode; the server will close down when there are no more connections.", - "type": "function", - "file_path": "flexx\\flexx\\app\\_funcs.py", - "ast_data": "FunctionDef name:run arguments Assign Call call:current_server Assign Return return:yes" - }, - { - "library": "pytorch", - "name": "clear_path_state", - "source_code": "def clear_path_state(self) -> None: pass", - "docstring": "Clear the path state in this current executing node", - "type": "method", - "file_path": "pytorch\\torch\\_inductor\\cudagraph_trees.py", - "ast_data": "FunctionDef name:clear_path_state arguments arg:self" - }, - { - "library": "tensorflow", - "name": "get_real_batch_size", - "source_code": "def get_real_batch_size(self, dataset_batch): if isinstance(dataset_batch, (tuple, list)): dataset_batch = dataset_batch[0] assert nest.flatten(dataset_batch) def _find_any_tensor(batch_features): tensors = [x for x in nest.flatten(batch_features) if tensor_util.is_tf_type(x)] if not tensors: raise ValueError('Cannot find any Tensor in features dict.') return tensors[0] return backend.cast(backend.shape(_find_any_tensor(dataset_batch))[0], dtype = 'int64')", - "docstring": "Returns the number of elements in a potentially partial batch.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\engine\\partial_batch_padding_handler.py", - "ast_data": "FunctionDef name:get_real_batch_size arguments arg:self arg:dataset_batch If Call call:isinstance Assign FunctionDef name:_find_any_tensor arguments arg:batch_features Assign If Raise raises:ValueError('Cannot find any Tensor in features dict.') Return return:yes Return return:yes" - }, - { - "library": "cherrypy", - "name": "index", - "source_code": "@cherrypy.expose def index(self): return '\\n \\n

Upload a file

\\n
\\n filename:
\\n \\n
\\n

Download a file

\\n This one\\n \\n '", - "docstring": "Produce HTTP response body of file upload app index URI.", - "type": "method", - "file_path": "cherrypy\\cherrypy\\tutorial\\tut09_files.py", - "ast_data": "FunctionDef name:index arguments arg:self Return return:yes" - }, - { - "library": "numpy", - "name": "hermepow", - "source_code": "def hermepow(c, pow, maxpower = 16): return pu._pow(hermemul, c, pow, maxpower)", - "docstring": "Raise a Hermite series to a power. Returns the Hermite series raised to the power . The argument is a sequence of coefficients ordered from low to high. i.e., [1,2,3] is the series `` Parameters ---------- c : array_like 1-D array of Hermite series coefficients ordered from low to high. pow : integer Power to which the series will be raised maxpower : integer, optional Maximum power allowed. This is mainly to limit growth of the series to unmanageable size. Default is 16 Returns ------- coef : ndarray Hermite series of power. See Also -------- hermeadd, hermesub, hermemulx, hermemul, hermediv Examples -------- >>> from numpy.polynomial.hermite_e import hermepow >>> hermepow([1, 2, 3], 2) array([23., 28., 46., 12., 9.])", - "type": "function", - "file_path": "numpy\\numpy\\polynomial\\hermite_e.py", - "ast_data": "FunctionDef name:hermepow arguments arg:c arg:pow arg:maxpower Return return:yes" - }, - { - "library": "tensorflow", - "name": "deferred_internal_captures", - "source_code": "@property def deferred_internal_captures(self): return list(self._function_captures.by_ref_internal.values())", - "docstring": "List of nest of placeholders which at call time will be fed.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py", - "ast_data": "FunctionDef name:deferred_internal_captures arguments arg:self Return return:yes" - }, - { - "library": "algorithms", - "name": "substitute", - "source_code": "def substitute(self, substitutions: Union[int, float, Fraction, Dict[int, Union[int, float, Fraction]]]) -> Fraction: if isinstance(substitutions, int) or isinstance(substitutions, float) or isinstance(substitutions, Fraction): substitutions = {v: Monomial._rationalize_if_possible(substitutions) for v in self.all_variables()} elif not self.all_variables().issubset(set(substitutions.keys())): raise ValueError(\"Some variables didn't receive their values.\") if self.coeff = = 0: return Fraction(0, 1) ans = Monomial._rationalize_if_possible(self.coeff) for k in self.variables: ans * = Monomial._rationalize_if_possible(substitutions[k] ** self.variables[k]) return Monomial._rationalize_if_possible(ans)", - "docstring": "Substitute the variables in the monomial for values defined by the substitutions dictionary.", - "type": "method", - "file_path": "algorithms\\algorithms\\maths\\polynomial.py", - "ast_data": "FunctionDef name:substitute arguments arg:self arg:substitutions type:Union[int, float, Fraction, Dict[int, Union[int, float, Fraction]]] If BoolOp Call call:isinstance Call call:isinstance Call call:isinstance Assign If Raise raises:ValueError(\"Some variables didn't receive their values.\") If Compare op:Eq Return return:yes Assign Call call:_rationalize_if_possible For Return return:yes" - }, - { - "library": "tensorflow", - "name": "__init__", - "source_code": "def __init__(self, keys, values, key_dtype = None, value_dtype = None, name = None): if not context.executing_eagerly() and ops.get_default_graph()._get_control_flow_context() is not None: with ops.init_scope(): self._keys = ops.convert_to_tensor(keys, dtype = key_dtype, name = 'keys') self._values = ops.convert_to_tensor(values, dtype = value_dtype, name = 'values') else: self._keys = ops.convert_to_tensor(keys, dtype = key_dtype, name = 'keys') self._values = ops.convert_to_tensor(values, dtype = value_dtype, name = 'values') self._name = name if name is not None else 'key_value_init' if context.executing_eagerly(): self._name + = str(ops.uid()) super(KeyValueTensorInitializer, self).__init__(self._keys.dtype, self._values.dtype)", - "docstring": "Constructs a table initializer object based on keys and values tensors. Args: keys: The tensor for the keys. values: The tensor for the values. key_dtype: The data type. Used when is a python array. value_dtype: The data type. Used when is a python array. name: A name for the operation (optional).", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\ops\\lookup_ops.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:keys arg:values arg:key_dtype arg:value_dtype arg:name If BoolOp Compare op:IsNot With Assign Call call:convert_to_tensor Assign Call call:convert_to_tensor Assign Call call:convert_to_tensor Assign Call call:convert_to_tensor Assign If Call call:executing_eagerly" - }, - { - "library": "coconut", - "name": "modify_add_code_before", - "source_code": "def modify_add_code_before(self, add_code_before_names, code_modifier): for name in add_code_before_names: self.add_code_before[name] = code_modifier(self.add_code_before[name]) replacement = self.add_code_before_replacements.get(name) if replacement is not None: self.add_code_before_replacements[name] = code_modifier(replacement)", - "docstring": "Apply code_modifier to all the code corresponding to add_code_before_names.", - "type": "method", - "file_path": "coconut\\coconut\\compiler\\compiler.py", - "ast_data": "FunctionDef name:modify_add_code_before arguments arg:self arg:add_code_before_names arg:code_modifier For Assign Call call:code_modifier Assign Call call:get If Compare op:IsNot Assign Call call:code_modifier" - }, - { - "library": "pytorch", - "name": "forward", - "source_code": "def forward(self, x: torch.Tensor) -> torch.Tensor: weight_quant_dequant = self.get_weight() result = F.conv1d(x, weight_quant_dequant, self.bias, self.stride, self.padding, self.dilation, self.groups) return result", - "docstring": "we have: w(float) -- quant - dequant x(float) ------------- F.conv1d --- In the full model, we will see w(float) -- quant - *dequant x -- quant --- *dequant -- *F.conv1d --- *quant - dequant and the backend should be able to fuse the ops with into a quantized conv1d", - "type": "method", - "file_path": "pytorch\\torch\\ao\\nn\\quantized\\reference\\modules\\conv.py", - "ast_data": "FunctionDef name:forward arguments arg:self arg:x type:torch.Tensor Assign Call call:get_weight Assign Call call:conv1d Return return:yes" - }, - { - "library": "numpy", - "name": "check_funcs_once", - "source_code": "def check_funcs_once(self, funcs, headers = None, include_dirs = None, libraries = None, library_dirs = None, decl = False, call = False, call_args = None): self._check_compiler() body = [] if decl: for f, v in decl.items(): if v: body.append('int %s (void);' % f) body.append('#ifdef _MSC_VER') for func in funcs: body.append('#pragma function(%s)' % func) body.append('#endif') body.append('int main (void) {') if call: for f in funcs: if f in call and call[f]: if not (call_args and f in call_args and call_args[f]): args = '' else: args = call_args[f] body.append(' %s(%s);' % (f, args)) else: body.append(' %s;' % f) else: for f in funcs: body.append(' %s;' % f) body.append(' return 0;') body.append('}') body = '\\n'.join(body) + '\\n' return self.try_link(body, headers, include_dirs, libraries, library_dirs)", - "docstring": "Check a list of functions at once. This is useful to speed up things, since all the functions in the funcs list will be put in one compilation unit. Arguments --------- funcs : seq list of functions to test include_dirs : seq list of header paths libraries : seq list of libraries to link the code snippet to library_dirs : seq list of library paths decl : dict for every (key, value), the declaration in the value will be used for function in key. If a function is not in the dictionary, no declaration will be used. call : dict for every item (f, value), if the value is True, a call will be done to the function f.", - "type": "method", - "file_path": "numpy\\numpy\\distutils\\command\\config.py", - "ast_data": "FunctionDef name:check_funcs_once arguments arg:self arg:funcs arg:headers arg:include_dirs arg:libraries arg:library_dirs arg:decl arg:call arg:call_args Assign If For Call call:items If For If For If BoolOp Compare op:In If Assign Assign For Assign Return return:yes" - }, - { - "library": "scipy", - "name": "rmatmat", - "source_code": "def rmatmat(self, X): if not (issparse(X) or is_pydata_spmatrix(X)): X = np.asanyarray(X) if X.ndim ! = 2: raise ValueError(f'expected 2-d ndarray or matrix, not {X.ndim}-d') if X.shape[0] ! = self.shape[0]: raise ValueError(f'dimension mismatch: {self.shape}, {X.shape}') try: Y = self._rmatmat(X) except Exception as e: if issparse(X) or is_pydata_spmatrix(X): raise TypeError('Unable to multiply a LinearOperator with a sparse matrix. Wrap the matrix in aslinearoperator() first.') from e raise if isinstance(Y, np.matrix): Y = asmatrix(Y) return Y", - "docstring": "Adjoint matrix-matrix multiplication. Performs the operation y = A^H @ x where A is an MxN linear operator and x is a column vector or 1-d array, or 2-d array. The default implementation defers to the adjoint. Parameters ---------- X : {matrix, ndarray} A matrix or 2D array. Returns ------- Y : {matrix, ndarray} A matrix or 2D array depending on the type of the input. Notes ----- This rmatmat wraps the user-specified rmatmat routine.", - "type": "method", - "file_path": "scipy\\scipy\\sparse\\linalg\\_interface.py", - "ast_data": "FunctionDef name:rmatmat arguments arg:self arg:X If Assign Call call:asanyarray If Compare op:NotEq Raise raises:ValueError(f'expected 2-d ndarray or matrix, not {X.ndim}-d') If Compare op:NotEq Raise raises:ValueError(f'dimension mismatch: {self.shape}, {X.shape}') Try Assign Call call:_rmatmat ExceptHandler If BoolOp Call call:issparse Call call:is_pydata_spmatrix Raise raises:TypeError('Unable to multiply a LinearOperator with a sparse matrix. Wrap the matrix in aslinearoperator() first.') Raise If Call call:isinstance Assign Call call:asmatrix Return return:yes" - }, - { - "library": "django", - "name": "get_units", - "source_code": "@classmethod def get_units(cls, wkt): return gdal.SpatialReference(wkt).units", - "docstring": "Return a tuple of (unit_value, unit_name) for the given WKT without using any of the database fields.", - "type": "method", - "file_path": "django\\django\\contrib\\gis\\db\\backends\\base\\models.py", - "ast_data": "FunctionDef name:get_units arguments arg:cls arg:wkt Return return:yes" - }, - { - "library": "scipy", - "name": "ExcitingMixing", - "source_code": "class ExcitingMixing(GenericBroyden): def __init__(self, alpha = None, alphamax = 1.0): GenericBroyden.__init__(self) self.alpha = alpha self.alphamax = alphamax self.beta = None def setup(self, x, F, func): GenericBroyden.setup(self, x, F, func) self.beta = np.full((self.shape[0],), self.alpha, dtype = self.dtype) def solve(self, f, tol = 0): return -f * self.beta def matvec(self, f): return -f / self.beta def rsolve(self, f, tol = 0): return -f * self.beta.conj() def rmatvec(self, f): return -f / self.beta.conj() def todense(self): return np.diag(-1 / self.beta) def _update(self, x, f, dx, df, dx_norm, df_norm): incr = f * self.last_f > 0 self.beta[incr] + = self.alpha self.beta[~incr] = self.alpha np.clip(self.beta, 0, self.alphamax, out = self.beta)", - "docstring": "Find a root of a function, using a tuned diagonal Jacobian approximation. The Jacobian matrix is diagonal and is tuned on each iteration. .. warning:: This algorithm may be useful for specific problems, but whether it will work may depend strongly on the problem. See Also -------- root : Interface to root finding algorithms for multivariate functions. See ``. %(params_extra)s", - "type": "class", - "file_path": "scipy\\scipy\\optimize\\_nonlin.py", - "ast_data": "ClassDef name:ExcitingMixing FunctionDef name:__init__ arguments arg:self arg:alpha arg:alphamax Assign Assign Assign FunctionDef name:setup arguments arg:self arg:x arg:F arg:func Assign Call call:full FunctionDef name:solve arguments arg:self arg:f arg:tol Return return:yes FunctionDef name:matvec arguments arg:self arg:f Return return:yes FunctionDef name:rsolve arguments arg:self arg:f arg:tol Return return:yes FunctionDef name:rmatvec arguments arg:self arg:f Return return:yes FunctionDef name:todense arguments arg:self Return return:yes FunctionDef name:_update arguments arg:self arg:x arg:f arg:dx arg:df arg:dx_norm arg:df_norm Assign Compare op:Gt Assign" - }, - { - "library": "matplotlib", - "name": "contourf", - "source_code": "@_preprocess_data() @_docstring.interpd def contourf(self, *args, **kwargs): kwargs['filled'] = True contours = mcontour.QuadContourSet(self, *args, **kwargs) self._request_autoscale_view() return contours", - "docstring": "Plot filled contours. Call signature:: contourf([X, Y,] Z, /, [levels], **kwargs) The arguments *X*, *Y*, *Z* are positional-only. %(contour_doc)s", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_axes.py", - "ast_data": "FunctionDef name:contourf arguments arg:self vararg:args kwarg:kwargs Call call:_preprocess_data Assign Assign Call call:QuadContourSet Return return:yes" - }, - { - "library": "pytorch", - "name": "fake_tensor_unsupported", - "source_code": "def fake_tensor_unsupported(fn): @functools.wraps(fn) def wrapper(model, inputs, **kwargs): with _disable_current_modes(): inputs = list(map(defake, inputs)) return fn(model, inputs, **kwargs) return wrapper", - "docstring": "Decorator for backends that need real inputs. We swap out fake tensors for zero tensors.", - "type": "function", - "file_path": "pytorch\\torch\\_dynamo\\backends\\common.py", - "ast_data": "FunctionDef name:fake_tensor_unsupported arguments arg:fn FunctionDef name:wrapper arguments arg:model arg:inputs kwarg:kwargs Call call:wraps With Assign Call call:list Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "UninitializedBuffer", - "source_code": "class UninitializedBuffer(UninitializedTensorMixin, torch.Tensor): cls_to_become = torch.Tensor def __new__(cls, requires_grad = False, device = None, dtype = None, persistent = True) -> None: factory_kwargs = {'device': device, 'dtype': dtype} data = torch.empty(0, **factory_kwargs) ret = torch.Tensor._make_subclass(cls, data, requires_grad) ret.persistent = persistent ret._is_buffer = True return ret", - "docstring": "A buffer that is not initialized. Uninitialized Buffer is a a special case of :class: where the shape of the data is still unknown. Unlike a :class:, uninitialized parameters hold no data and attempting to access some properties, like their shape, will throw a runtime error. The only operations that can be performed on a uninitialized parameter are changing its datatype, moving it to a different device and converting it to a regular :class:. The default device or dtype to use when the buffer is materialized can be set during construction using e.g. ``.", - "type": "class", - "file_path": "pytorch\\torch\\nn\\parameter.py", - "ast_data": "ClassDef name:UninitializedBuffer Assign FunctionDef name:__new__ arguments arg:cls arg:requires_grad arg:device arg:dtype arg:persistent Assign Assign Call call:empty Assign Call call:_make_subclass Assign Assign Return return:yes" - }, - { - "library": "scipy", - "name": "kronsum", - "source_code": "def kronsum(A, B, format = None): if isinstance(A, sparray) or isinstance(B, sparray): coo_sparse = coo_array identity_sparse = eye_array else: coo_sparse = coo_matrix identity_sparse = identity A = coo_sparse(A) B = coo_sparse(B) if A.ndim ! = 2: raise ValueError(f'kronsum requires 2D inputs. `A` is {A.ndim}D.') if B.ndim ! = 2: raise ValueError(f'kronsum requires 2D inputs. `B` is {B.ndim}D.') if A.shape[0] ! = A.shape[1]: raise ValueError('A is not square') if B.shape[0] ! = B.shape[1]: raise ValueError('B is not square') dtype = upcast(A.dtype, B.dtype) I_n = identity_sparse(A.shape[0], dtype = dtype) I_m = identity_sparse(B.shape[0], dtype = dtype) L = kron(I_m, A, format = 'coo') R = kron(B, I_n, format = 'coo') return (L + R).asformat(format)", - "docstring": "kronecker sum of square sparse matrices A and B Kronecker sum of two sparse matrices is a sum of two Kronecker products kron(I_n,A) + kron(B,I_m) where A has shape (m,m) and B has shape (n,n) and I_m and I_n are identity matrices of shape (m,m) and (n,n), respectively. Parameters ---------- A square matrix B square matrix format : str format of the result (e.g. \"csr\") Returns ------- kronecker sum in a sparse matrix format", - "type": "function", - "file_path": "scipy\\scipy\\sparse\\_construct.py", - "ast_data": "FunctionDef name:kronsum arguments arg:A arg:B arg:format If BoolOp Call call:isinstance Call call:isinstance Assign Assign Assign Assign Assign Call call:coo_sparse Assign Call call:coo_sparse If Compare op:NotEq Raise raises:ValueError(f'kronsum requires 2D inputs. `A` is {A.ndim}D.') If Compare op:NotEq Raise raises:ValueError(f'kronsum requires 2D inputs. `B` is {B.ndim}D.') If Compare op:NotEq Raise raises:ValueError('A is not square') If Compare op:NotEq Raise raises:ValueError('B is not square') Assign Call call:upcast Assign Call call:identity_sparse Assign Call call:identity_sparse Assign Call call:kron Assign Call call:kron Return return:yes" - }, - { - "library": "matplotlib", - "name": "format_pct", - "source_code": "def format_pct(self, x, display_range): x = self.convert_to_pct(x) if self.decimals is None: scaled_range = self.convert_to_pct(display_range) if scaled_range < = 0: decimals = 0 else: decimals = math.ceil(2.0 - math.log10(2.0 * scaled_range)) if decimals > 5: decimals = 5 elif decimals < 0: decimals = 0 else: decimals = self.decimals s = f'{x: 0.{int(decimals)}f}' return s + self.symbol", - "docstring": "Format the number as a percentage number with the correct number of decimals and adds the percent symbol, if any. If `None` => 34.50% ... ... ... ============= ======== ======================= This method will not be very good for tiny axis ranges or extremely large ones. It assumes that the values on the chart are percentages displayed on a reasonable scale.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\ticker.py", - "ast_data": "FunctionDef name:format_pct arguments arg:self arg:x arg:display_range Assign Call call:convert_to_pct If Compare op:Is Assign Call call:convert_to_pct If Compare op:LtE Assign Assign Call call:ceil If Compare op:Gt Assign If Compare op:Lt Assign Assign Assign Return return:yes" - }, - { - "library": "kornia", - "name": "SpatialSoftArgmax2d", - "source_code": "class SpatialSoftArgmax2d(Module): def __init__(self, temperature: Optional[Tensor] = None, normalized_coordinates: bool = True) -> None: super().__init__() if temperature is None: temperature = tensor(1.0) self.temperature: Tensor = temperature self.normalized_coordinates: bool = normalized_coordinates def __repr__(self) -> str: return f'{self.__class__.__name__}temperature = {self.temperature}, normalized_coordinates = {self.normalized_coordinates})' def forward(self, input: Tensor) -> Tensor: return spatial_soft_argmax2d(input, self.temperature, self.normalized_coordinates)", - "docstring": "Compute the Spatial Soft-Argmax 2D of a given heatmap. See :func: for details.", - "type": "class", - "file_path": "kornia\\kornia\\geometry\\subpix\\spatial_soft_argmax.py", - "ast_data": "ClassDef name:SpatialSoftArgmax2d FunctionDef name:__init__ arguments arg:self arg:temperature type:Optional[Tensor] arg:normalized_coordinates type:bool If Compare op:Is Assign Call call:tensor FunctionDef name:__repr__ arguments arg:self Return return:yes FunctionDef name:forward arguments arg:self arg:input type:Tensor Return return:yes" - }, - { - "library": "django", - "name": "change_aliases", - "source_code": "def change_aliases(self, change_map): if not change_map: return self assert set(change_map).isdisjoint(change_map.values()) self.where.relabel_aliases(change_map) if isinstance(self.group_by, tuple): self.group_by = tuple([col.relabeled_clone(change_map) for col in self.group_by]) self.select = tuple([col.relabeled_clone(change_map) for col in self.select]) self.annotations = self.annotations and {key: col.relabeled_clone(change_map) for key, col in self.annotations.items()} for old_alias, new_alias in change_map.items(): if old_alias not in self.alias_map: continue alias_data = self.alias_map[old_alias].relabeled_clone(change_map) self.alias_map[new_alias] = alias_data self.alias_refcount[new_alias] = self.alias_refcount[old_alias] del self.alias_refcount[old_alias] del self.alias_map[old_alias] table_aliases = self.table_map[alias_data.table_name] for pos, alias in enumerate(table_aliases): if alias = = old_alias: table_aliases[pos] = new_alias break self.external_aliases = {change_map.get(alias, alias): aliased or alias in change_map for alias, aliased in self.external_aliases.items()} for combined_query in self.combined_queries: external_change_map = {alias: aliased for alias, aliased in change_map.items() if alias in combined_query.external_aliases} combined_query.change_aliases(external_change_map)", - "docstring": "Change the aliases in change_map (which maps old-alias -> new-alias), relabelling any references to them in select columns and the where clause.", - "type": "method", - "file_path": "django\\django\\db\\models\\sql\\query.py", - "ast_data": "FunctionDef name:change_aliases arguments arg:self arg:change_map If Return return:yes If Call call:isinstance Assign Call call:tuple Assign Call call:tuple Assign BoolOp For Call call:items If Compare op:NotIn Assign Call call:relabeled_clone Assign Assign Assign For Call call:enumerate If Compare op:Eq Assign Assign For Assign" - }, - { - "library": "scipy", - "name": "Michalewicz", - "source_code": "class Michalewicz(Benchmark): def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([0.0] * self.N, [pi] * self.N)) self.global_optimum = [[2.20290555, 1.570796]] self.fglob = -1.8013 def fun(self, x, *args): self.nfev + = 1 m = 10.0 i = arange(1, self.N + 1) return -sum(sin(x) * sin(i * x ** 2 / pi) ** (2 * m))", - "docstring": "Michalewicz objective function. This class defines the Michalewicz [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{Michalewicz}}(x) = - \\sum_{i=1}^{2} \\sin\\left(x_i\\right) \\sin^{2 m}\\left(\\frac{i x_i^{2}}{\\pi}\\right) Where, in this exercise, :math:. with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Adorio, E. MVF - \"Multivariate Test Functions Library in C for Unconstrained Global Optimization\", 2005 TODO: could change dimensionality, but global minimum might change.", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_M.py", - "ast_data": "ClassDef name:Michalewicz FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Assign Assign Call call:arange Return return:yes" - }, - { - "library": "pytorch", - "name": "NnapiInterfaceWrapper", - "source_code": "class NnapiInterfaceWrapper(torch.nn.Module): def __init__(self, mod): super().__init__() self.mod = mod", - "docstring": "NNAPI list-ifying and de-list-ifying wrapper. NNAPI always expects a list of inputs and provides a list of outputs. This module allows us to accept inputs as separate arguments. It returns results as either a single tensor or tuple, matching the original module.", - "type": "class", - "file_path": "pytorch\\torch\\backends\\_nnapi\\prepare.py", - "ast_data": "ClassDef name:NnapiInterfaceWrapper FunctionDef name:__init__ arguments arg:self arg:mod Assign" - }, - { - "library": "scikit-learn", - "name": "fit_predict", - "source_code": "def fit_predict(self, X, y = None): return super().fit_predict(X, y)", - "docstring": "Fit and return the result of each sample's clustering assignment. In addition to fitting, this method also return the result of the clustering assignment for each sample in the training set. Parameters ---------- X : array-like of shape (n_samples, n_features) or (n_samples, n_samples) Training instances to cluster, or distances between instances if ``. y : Ignored Not used, present here for API consistency by convention. Returns ------- labels : ndarray of shape (n_samples,) Cluster labels.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\cluster\\_agglomerative.py", - "ast_data": "FunctionDef name:fit_predict arguments arg:self arg:X arg:y Return return:yes" - }, - { - "library": "pandas", - "name": "lazy_load_stub_copy", - "source_code": "def lazy_load_stub_copy(text): global copy, paste copy, paste = determine_clipboard() return copy(text)", - "docstring": "A stub function for copy(), which will load the real copy() function when called so that the real copy() function is used for later calls. This allows users to import pyperclip without having determine_clipboard() automatically run, which will automatically select a clipboard mechanism. This could be a problem if it selects, say, the memory-heavy PyQt4 module but the user was just going to immediately call set_clipboard() to use a different clipboard mechanism. The lazy loading this stub function implements gives the user a chance to call set_clipboard() to pick another clipboard mechanism. Or, if the user simply calls copy() or paste() without calling set_clipboard() first, will fall back on whatever clipboard mechanism that determine_clipboard() automatically chooses.", - "type": "function", - "file_path": "pandas\\pandas\\io\\clipboard\\__init__.py", - "ast_data": "FunctionDef name:lazy_load_stub_copy arguments arg:text Assign Call call:determine_clipboard Return return:yes" - }, - { - "library": "tensorflow", - "name": "restore", - "source_code": "def restore(self, restored_tensors, restored_shapes): restored_tensor = restored_tensors[0] if restored_shapes is not None: restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0]) with ops.device(self._var_device): restored_tensor = array_ops.identity(restored_tensor) try: assigned_variable = resource_variable_ops.shape_safe_assign_variable_handle(self.handle_op, self._var_shape, restored_tensor) except ValueError as e: raise ValueError(f'Received incompatible tensor with shape {restored_tensor.shape} when attempting to restore variable with shape {self._var_shape} and name {self.name}.') from e return assigned_variable", - "docstring": "Restores tensors. Raises ValueError if incompatible shape found.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\training\\saving\\saveable_object_util.py", - "ast_data": "FunctionDef name:restore arguments arg:self arg:restored_tensors arg:restored_shapes Assign If Compare op:IsNot Assign Call call:reshape With Assign Call call:identity Try Assign Call call:shape_safe_assign_variable_handle ExceptHandler Raise raises:ValueError(f'Received incompatible tensor with shape {restored_tensor.shape} when attempting to restore variable with shape {self._var_shape} and name {self.name}.') Return return:yes" - }, - { - "library": "tensorflow", - "name": "set_and_validate_functions", - "source_code": "def set_and_validate_functions(self, function_dict): for key in self.all_functions: if key in function_dict: if function_dict[key] is not None and (not isinstance(function_dict[key], (def_function.Function, save_impl.LayerCall))): raise ValueError('Function dictionary contained a non-function object: {} (for key {})'.format(function_dict[key], key)) fn = function_dict[key] self._function_dict[key] = fn tf_fn = fn.wrapped_call if isinstance(fn, save_impl.LayerCall) else fn setattr(self._keras_trackable, key, tf_fn) else: raise ValueError('Function {} missing from serialized function dict.'.format(key)) return self.functions", - "docstring": "Saves function dictionary, and validates dictionary values.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py", - "ast_data": "FunctionDef name:set_and_validate_functions arguments arg:self arg:function_dict For If Compare op:In If BoolOp Compare op:IsNot Raise raises:ValueError('Function dictionary contained a non-function object: {} (for key {})'.format(function_dict[key], key)) Assign Assign Assign Raise raises:ValueError('Function {} missing from serialized function dict.'.format(key)) Return return:yes" - }, - { - "library": "pytorch", - "name": "TraceWrappedHigherOrderOperatorVariable", - "source_code": "class TraceWrappedHigherOrderOperatorVariable(TorchHigherOrderOperatorVariable): def call_function(self, tx: 'InstructionTranslator', args: 'list[VariableTracker]', kwargs: 'dict[str, VariableTracker]') -> 'VariableTracker': kwargs = dict(kwargs) fn = kwargs.pop('fn') return fn.call_function(tx, args, kwargs)", - "docstring": "Handles torch._dynamo._trace_wrapped_higher_order_op.inner_trace by unwrapping the higher order op and inlining through it. This op is created by dynamo to survive through AotAutograd, then unwrapped here in the call to dynamo from compiled autograd.", - "type": "class", - "file_path": "pytorch\\torch\\_dynamo\\variables\\higher_order_ops.py", - "ast_data": "ClassDef name:TraceWrappedHigherOrderOperatorVariable FunctionDef name:call_function arguments arg:self arg:tx type:'InstructionTranslator' arg:args type:'list[VariableTracker]' arg:kwargs type:'dict[str, VariableTracker]' Assign Call call:dict Assign Call call:pop Return return:yes" - }, - { - "library": "pandas", - "name": "nbytes", - "source_code": "@property def nbytes(self) -> int: raise AbstractMethodError(self)", - "docstring": "The number of bytes needed to store this object in memory. See Also -------- ExtensionArray.shape: Return a tuple of the array dimensions. ExtensionArray.size: The number of elements in the array. Examples -------- >>> pd.array([1, 2, 3]).nbytes 27", - "type": "method", - "file_path": "pandas\\pandas\\core\\arrays\\base.py", - "ast_data": "FunctionDef name:nbytes arguments arg:self Raise raises:AbstractMethodError(self)" - }, - { - "library": "tensorflow", - "name": "trainable_variables", - "source_code": "@property def trainable_variables(self): return tuple((v for v in self.variables if v.trainable))", - "docstring": "A sequence of trainable variables accessed by this FuncGraph. Note that functions keep only weak references to variables. Calling the function after a variable it accesses has been deleted is an error. Returns: Sequence of trainable variables for this func graph.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\framework\\func_graph.py", - "ast_data": "FunctionDef name:trainable_variables arguments arg:self Return return:yes" - }, - { - "library": "feincms", - "name": "UnpackTemplateResponse", - "source_code": "class UnpackTemplateResponse(TemplateResponse): _feincms_unpack = True", - "docstring": "Completely the same as marking applicationcontent-contained views with the `` decorator.", - "type": "class", - "file_path": "feincms\\feincms\\content\\application\\models.py", - "ast_data": "ClassDef name:UnpackTemplateResponse Assign" - }, - { - "library": "pytorch", - "name": "ones_", - "source_code": "def ones_(tensor: Tensor) -> Tensor: return _no_grad_fill_(tensor, 1.0)", - "docstring": "Fill the input Tensor with the scalar value . Args: tensor: an n-dimensional Examples: >>> w = torch.empty(3, 5) >>> nn.init.ones_(w)", - "type": "function", - "file_path": "pytorch\\torch\\nn\\init.py", - "ast_data": "FunctionDef name:ones_ arguments arg:tensor type:Tensor Return return:yes" - }, - { - "library": "mongo", - "name": "write", - "source_code": "def write(self, data: Any) -> None: if self._closed: raise ValueError('cannot write to a closed file') try: read = data.read except AttributeError: if not isinstance(data, (str, bytes)): raise TypeError('can only write strings or file-like objects') from None if isinstance(data, str): try: data = data.encode(self.encoding) except AttributeError: raise TypeError('must specify an encoding for file in order to write str') from None read = io.BytesIO(data).read if inspect.iscoroutinefunction(read): self._write_async(read) else: if self._buffer.tell() > 0: space = self.chunk_size - self._buffer.tell() if space: try: to_write = read(space) except BaseException: self.abort() raise self._buffer.write(to_write) if len(to_write) < space: return self._flush_buffer() to_write = read(self.chunk_size) while to_write and len(to_write) = = self.chunk_size: self._flush_data(to_write) to_write = read(self.chunk_size) self._buffer.write(to_write)", - "docstring": "Write data to the file. There is no return value. can be either a string of bytes or a file-like object (implementing :meth:). If the file has an :attr: attribute, can also be a :class: instance, which will be encoded as :attr: before being written. Due to buffering, the data may not actually be written to the database until the :meth: method is called. Raises :class: if this file is already closed. Raises :class: if is not an instance of :class:, a file-like object, or an instance of :class:. Unicode data is only allowed if the file has an :attr: attribute. :param data: string of bytes or file-like object to be written to the file", - "type": "method", - "file_path": "mongo\\gridfs\\synchronous\\grid_file.py", - "ast_data": "FunctionDef name:write arguments arg:self arg:data type:Any If Raise raises:ValueError('cannot write to a closed file') Try Assign ExceptHandler If Raise raises:TypeError('can only write strings or file-like objects') If Call call:isinstance Try Assign Call call:encode ExceptHandler Raise raises:TypeError('must specify an encoding for file in order to write str') Assign If Call call:iscoroutinefunction If Compare op:Gt Assign If Try Assign Call call:read ExceptHandler Raise If Compare op:Lt Return return:no Assign Call call:read While BoolOp Compare op:Eq Assign Call call:read" - }, - { - "library": "tensorflow", - "name": "RNNAttributes", - "source_code": "class RNNAttributes(SerializedAttributes.with_attributes('RNNAttributes', checkpointable_objects = ['states'], copy_from = [LayerAttributes])): pass", - "docstring": "RNN checkpointable objects + functions that are saved to the SavedModel. List of all attributes: All attributes from LayerAttributes (including CommonEndpoints) states: List of state variables", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\saved_model\\serialized_attributes.py", - "ast_data": "ClassDef name:RNNAttributes Call call:with_attributes" - }, - { - "library": "django", - "name": "from_string", - "source_code": "def from_string(self, template_code): raise NotImplementedError('subclasses of BaseEngine should provide a from_string() method')", - "docstring": "Create and return a template for the given source code. This method is optional.", - "type": "method", - "file_path": "django\\django\\template\\backends\\base.py", - "ast_data": "FunctionDef name:from_string arguments arg:self arg:template_code Raise raises:NotImplementedError('subclasses of BaseEngine should provide a from_string() method')" - }, - { - "library": "matplotlib", - "name": "get_forced_alpha", - "source_code": "def get_forced_alpha(self): return self._forced_alpha", - "docstring": "Return whether the value given by get_alpha() should be used to override any other alpha-channel values.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\backend_bases.py", - "ast_data": "FunctionDef name:get_forced_alpha arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "simple_reduce", - "source_code": "def simple_reduce(self, rank): if self.collapsed is not None: return assert rank > 0 while len(self.cs) > rank: del self.cs[0] del self.ds[0]", - "docstring": "Reduce the rank of the matrix by dropping oldest vectors.", - "type": "method", - "file_path": "scipy\\scipy\\optimize\\_nonlin.py", - "ast_data": "FunctionDef name:simple_reduce arguments arg:self arg:rank If Compare op:IsNot Return return:no While Compare op:Gt" - }, - { - "library": "pytorch", - "name": "safe_expand", - "source_code": "@lru_cache(256) def safe_expand(r: _SympyT) -> _SympyT: if hasattr(r, 'expand'): try: return _fast_expand(r) except RecursionError: log.warning('RecursionError in _fast_expand(%s)', r) return r else: return r", - "docstring": "Expand the given symbolic expression by recursively rewriting product of sums into sum of products (with the product being either a multiplication or exponentiation). NOTE: using this on an intermediate expression may prevent simplification down the line, e.g., if we eagerly expand into , we won't be able to simplify as easily.", - "type": "function", - "file_path": "pytorch\\torch\\fx\\experimental\\symbolic_shapes.py", - "ast_data": "FunctionDef name:safe_expand arguments arg:r type:_SympyT Call call:lru_cache If Call call:hasattr Try Return return:yes ExceptHandler Return return:yes Return return:yes" - }, - { - "library": "pygame", - "name": "array2d", - "source_code": "def array2d(surface): bpp = surface.get_bytesize() try: dtype = (numpy.uint8, numpy.uint16, numpy.int32, numpy.int32)[bpp - 1] except IndexError: raise ValueError(f'unsupported bit depth {bpp * 8} for 2D array') size = surface.get_size() array = numpy.empty(size, dtype) surface_to_array(array, surface) return array", - "docstring": "pygame.surfarray.array2d(Surface): return array copy pixels into a 2d array Copy the pixels from a Surface into a 2D array. The bit depth of the surface will control the size of the integer values, and will work for any type of pixel format. This function will temporarily lock the Surface as pixels are copied (see the Surface.lock - lock the Surface memory for pixel access method).", - "type": "function", - "file_path": "pygame\\src_py\\surfarray.py", - "ast_data": "FunctionDef name:array2d arguments arg:surface Assign Call call:get_bytesize Try Assign ExceptHandler Raise raises:ValueError(f'unsupported bit depth {bpp * 8} for 2D array') Assign Call call:get_size Assign Call call:empty Return return:yes" - }, - { - "library": "scipy", - "name": "iterate_structure", - "source_code": "def iterate_structure(structure, iterations, origin = None): structure = np.asarray(structure) if iterations < 2: return structure.copy() ni = iterations - 1 shape = [ii + ni * (ii - 1) for ii in structure.shape] pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))] slc = tuple((slice(pos[ii], pos[ii] + structure.shape[ii], None) for ii in range(len(shape)))) out = np.zeros(shape, bool) out[slc] = structure ! = 0 out = binary_dilation(out, structure, iterations = ni) if origin is None: return out else: origin = _ni_support._normalize_sequence(origin, structure.ndim) origin = [iterations * o for o in origin] return (out, origin)", - "docstring": "Iterate a structure by dilating it with itself. Parameters ---------- structure : array_like Structuring element (an array of bools, for example), to be dilated with itself. iterations : int number of dilations performed on the structure with itself origin : optional If origin is None, only the iterated structure is returned. If not, a tuple of the iterated structure and the modified origin is returned. Returns ------- iterate_structure : ndarray of bools A new structuring element obtained by dilating ( - 1) times with itself. See Also -------- generate_binary_structure Examples -------- >>> from scipy import ndimage >>> struct = ndimage.generate_binary_structure(2, 1) >>> struct.astype(int) array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) >>> ndimage.iterate_structure(struct, 2).astype(int) array([[0, 0, 1, 0, 0], [0, 1, 1, 1, 0], [1, 1, 1, 1, 1], [0, 1, 1, 1, 0], [0, 0, 1, 0, 0]]) >>> ndimage.iterate_structure(struct, 3).astype(int) array([[0, 0, 0, 1, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 1, 0, 0, 0]])", - "type": "function", - "file_path": "scipy\\scipy\\ndimage\\_morphology.py", - "ast_data": "FunctionDef name:iterate_structure arguments arg:structure arg:iterations arg:origin Assign Call call:asarray If Compare op:Lt Return return:yes Assign Assign Assign Assign Call call:tuple Assign Call call:zeros Assign Compare op:NotEq Assign Call call:binary_dilation If Compare op:Is Return return:yes Assign Call call:_normalize_sequence Assign Return return:yes" - }, - { - "library": "tensorflow", - "name": "get_timestamped_export_dir", - "source_code": "def get_timestamped_export_dir(export_dir_base): attempts = 0 while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS: timestamp = int(time.time()) result_dir = os.path.join(compat.as_bytes(export_dir_base), compat.as_bytes(str(timestamp))) if not gfile.Exists(result_dir): return result_dir time.sleep(1) attempts + = 1 logging.warning('Directory {} already exists; retrying (attempt {}/{})'.format(compat.as_str(result_dir), attempts, MAX_DIRECTORY_CREATION_ATTEMPTS)) raise RuntimeError('Failed to obtain a unique export directory name after {} attempts.'.format(MAX_DIRECTORY_CREATION_ATTEMPTS))", - "docstring": "Builds a path to a new subdirectory within the base directory. Each export is written into a new subdirectory named using the current time. This guarantees monotonically increasing version numbers even across multiple runs of the pipeline. The timestamp used is the number of seconds since epoch UTC. Args: export_dir_base: A string containing a directory to write the exported graph and checkpoints. Returns: The full path of the new subdirectory (which is not actually created yet). Raises: RuntimeError: if repeated attempts fail to obtain a unique timestamped directory name.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\utils_v1\\export_utils.py", - "ast_data": "FunctionDef name:get_timestamped_export_dir arguments arg:export_dir_base Assign While Compare op:Lt Assign Call call:int Assign Call call:join If Return return:yes Raise raises:RuntimeError('Failed to obtain a unique export directory name after {} attempts.'.format(MAX_DIRECTORY_CREATION_ATTEMPTS))" - }, - { - "library": "matplotlib", - "name": "get_outer_bbox", - "source_code": "def get_outer_bbox(self, rows = 0, cols = 0): rows = np.atleast_1d(rows) cols = np.atleast_1d(cols) bbox = Bbox.from_extents(self.lefts[cols[0]].value(), self.bottoms[rows[-1]].value(), self.rights[cols[-1]].value(), self.tops[rows[0]].value()) return bbox", - "docstring": "Return the outer bounding box of the subplot specs given by rows and cols. rows and cols can be spans.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\_layoutgrid.py", - "ast_data": "FunctionDef name:get_outer_bbox arguments arg:self arg:rows arg:cols Assign Call call:atleast_1d Assign Call call:atleast_1d Assign Call call:from_extents Return return:yes" - }, - { - "library": "pytorch", - "name": "MapperMapDataPipe", - "source_code": "@functional_datapipe('map') class MapperMapDataPipe(MapDataPipe[_T_co]): datapipe: MapDataPipe fn: Callable def __init__(self, datapipe: MapDataPipe, fn: Callable = default_fn) -> None: super().__init__() self.datapipe = datapipe _check_unpickable_fn(fn) self.fn = fn def __len__(self) -> int: return len(self.datapipe) def __getitem__(self, index) -> _T_co: return self.fn(self.datapipe[index])", - "docstring": "Apply the input function over each item from the source DataPipe (functional name: ``). The function can be any regular Python function or partial object. Lambda function is not recommended as it is not supported by pickle. Args: datapipe: Source MapDataPipe fn: Function being applied to each item Example: >>> # xdoctest: +SKIP >>> from torchdata.datapipes.map import SequenceWrapper, Mapper >>> def add_one(x): ... return x + 1 >>> dp = SequenceWrapper(range(10)) >>> map_dp_1 = dp.map(add_one) >>> list(map_dp_1) [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] >>> map_dp_2 = Mapper(dp, lambda x: x + 1) >>> list(map_dp_2) [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]", - "type": "class", - "file_path": "pytorch\\torch\\utils\\data\\datapipes\\map\\callable.py", - "ast_data": "ClassDef name:MapperMapDataPipe Call call:functional_datapipe FunctionDef name:__init__ arguments arg:self arg:datapipe type:MapDataPipe arg:fn type:Callable Assign Assign FunctionDef name:__len__ arguments arg:self Return return:yes FunctionDef name:__getitem__ arguments arg:self arg:index Return return:yes" - }, - { - "library": "tensorflow", - "name": "DatasetInitializer", - "source_code": "@tf_export('data.experimental.DatasetInitializer') class DatasetInitializer(lookup_ops.TableInitializerBase): def __init__(self, dataset): self.dataset = dataset elem_spec = self.dataset.element_spec _check_table_initializer_element_spec(elem_spec) key_type = elem_spec[0].dtype value_type = elem_spec[1].dtype super(DatasetInitializer, self).__init__(key_type, value_type) def initialize(self, table): lookup_ops.check_table_dtypes(table, self._key_dtype, self._value_dtype) init_op = ged_ops.initialize_table_from_dataset(table.resource_handle, self.dataset._variant_tensor) ops.add_to_collection(ops.GraphKeys.TABLE_INITIALIZERS, init_op) return init_op", - "docstring": "Creates a table initializer from a . Sample usage: >>> keys = tf.data.Dataset.range(100) >>> values = tf.data.Dataset.range(100).map( ... lambda x: tf.strings.as_string(x * 2)) >>> ds = tf.data.Dataset.zip((keys, values)) >>> init = tf.data.experimental.DatasetInitializer(ds) >>> table = tf.lookup.StaticHashTable(init, \"\") >>> table.lookup(tf.constant([0, 1, 2], dtype=tf.int64)).numpy() array([b'0', b'2', b'4'], dtype=object) Attributes: dataset: A object that produces tuples of scalars. The first scalar is treated as a key and the second as value. Raises: ValueError if doesn't conform to specifications.", - "type": "class", - "file_path": "tensorflow\\tensorflow\\python\\data\\experimental\\ops\\lookup_ops.py", - "ast_data": "ClassDef name:DatasetInitializer Call call:tf_export FunctionDef name:__init__ arguments arg:self arg:dataset Assign Assign Assign Assign FunctionDef name:initialize arguments arg:self arg:table Assign Call call:initialize_table_from_dataset Return return:yes" - }, - { - "library": "kornia", - "name": "KORNIA_CHECK_LAF", - "source_code": "def KORNIA_CHECK_LAF(laf: Tensor, raises: bool = True) -> bool: return KORNIA_CHECK_SHAPE(laf, ['B', 'N', '2', '3'], raises)", - "docstring": "Check whether a Local Affine Frame (laf) has a valid shape. Args: laf: local affine frame tensor to evaluate. raises: bool indicating whether an exception should be raised upon failure. Raises: Exception: if the input laf does not have a shape :math: and raises is True. Example: >>> lafs = torch.rand(2, 10, 2, 3) >>> KORNIA_CHECK_LAF(lafs) True", - "type": "function", - "file_path": "kornia\\kornia\\core\\check.py", - "ast_data": "FunctionDef name:KORNIA_CHECK_LAF arguments arg:laf type:Tensor arg:raises type:bool Return return:yes" - }, - { - "library": "pytorch", - "name": "no_sync", - "source_code": "@contextmanager def no_sync(self) -> Generator: _lazy_init(self, self) if not self._is_root: raise RuntimeError('`no_sync()` on inner FSDP instances is not supported. Please call `no_sync()` on root FSDP module.') self._assert_state(TrainingState.IDLE) old_flags = [] for m in self.modules(): if isinstance(m, FullyShardedDataParallel): old_flags.append((m, m._sync_gradients)) m._sync_gradients = False try: yield finally: for m, old_flag in old_flags: assert not m._sync_gradients, '`_sync_gradients` was incorrectly set to `True` while in the `no_sync()` context manager' m._sync_gradients = old_flag", - "docstring": "Disable gradient synchronizations across FSDP instances. Within this context, gradients will be accumulated in module variables, which will later be synchronized in the first forward-backward pass after exiting the context. This should only be used on the root FSDP instance and will recursively apply to all children FSDP instances. .. note:: This likely results in higher memory usage because FSDP will accumulate the full model gradients (instead of gradient shards) until the eventual sync. .. note:: When used with CPU offloading, the gradients will not be offloaded to CPU when inside the context manager. Instead, they will only be offloaded right after the eventual sync.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\fsdp\\fully_sharded_data_parallel.py", - "ast_data": "FunctionDef name:no_sync arguments arg:self If Raise raises:RuntimeError('`no_sync()` on inner FSDP instances is not supported. Please call `no_sync()` on root FSDP module.') Assign For Call call:modules If Call call:isinstance Assign Try For Assign" - }, - { - "library": "pytorch", - "name": "invalid_unique_memory_format", - "source_code": "def invalid_unique_memory_format(tensor, valid_memory_formats): n_legality = 0 for memory_format in valid_memory_formats: if tensor.is_contiguous(memory_format = memory_format): n_legality + = 1 return n_legality ! = 1", - "docstring": "Returns True if the tensor cannot be uniquely mapped to any of the given memory formats, False otherwise.", - "type": "method", - "file_path": "pytorch\\torch\\_tensor.py", - "ast_data": "FunctionDef name:invalid_unique_memory_format arguments arg:tensor arg:valid_memory_formats Assign For If Call call:is_contiguous Return return:yes" - }, - { - "library": "django", - "name": "related_objects", - "source_code": "@cached_property def related_objects(self): all_related_fields = self._get_fields(forward = False, reverse = True, include_hidden = True) return make_immutable_fields_list('related_objects', (obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many))", - "docstring": "Return all related objects pointing to the current model. The related objects can come from a one-to-one, one-to-many, or many-to-many field relation type. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list.", - "type": "method", - "file_path": "django\\django\\db\\models\\options.py", - "ast_data": "FunctionDef name:related_objects arguments arg:self Assign Call call:_get_fields Return return:yes" - }, - { - "library": "pytorch", - "name": "reduce", - "source_code": "@_exception_logger def reduce(tensor: torch.Tensor, dst: Optional[int] = None, op = ReduceOp.SUM, group: Optional[ProcessGroup] = None, async_op: bool = False, group_dst: Optional[int] = None): group = _group_or_default_group(group) group_dst = _canonicalize_group_rank(group, dst, group_dst, return_global = False) _check_single_tensor(tensor, 'tensor') if _rank_not_in_group(group): _warn_not_in_group('reduce') return opts = ReduceOptions() opts.reduceOp = op opts.rootRank = group_dst opts.asyncOp = async_op work = group.reduce([tensor], opts) if async_op: return work elif work is not None: work.wait()", - "docstring": "Reduces the tensor data across all machines. Only the process with rank `` but not both. Returns: Async work handle, if async_op is set to True. None, if not async_op or if not part of the group", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", - "ast_data": "FunctionDef name:reduce arguments arg:tensor type:torch.Tensor arg:dst type:Optional[int] arg:op arg:group type:Optional[ProcessGroup] arg:async_op type:bool arg:group_dst type:Optional[int] Assign Call call:_group_or_default_group Assign Call call:_canonicalize_group_rank If Call call:_rank_not_in_group Return return:no Assign Call call:ReduceOptions Assign Assign Assign Assign Call call:reduce If Return return:yes If Compare op:IsNot" - }, - { - "library": "tensorflow", - "name": "on_batch_end", - "source_code": "def on_batch_end(self, batch, logs = None): logs = logs or {} self._samples_seen + = logs.get('size', 1) samples_seen_since = self._samples_seen - self._samples_seen_at_last_write if self.update_freq ! = 'epoch' and samples_seen_since > = self.update_freq: batch_logs = {'batch_' + k: v for k, v in logs.items() if k not in ['batch', 'size', 'num_steps']} self._write_custom_summaries(self._total_batches_seen, batch_logs) self._samples_seen_at_last_write = self._samples_seen self._total_batches_seen + = 1 self._stop_profiler()", - "docstring": "Writes scalar summaries for metrics on every training batch. Performs profiling if current batch is in profiler_batches.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\keras\\callbacks_v1.py", - "ast_data": "FunctionDef name:on_batch_end arguments arg:self arg:batch arg:logs Assign BoolOp Assign If BoolOp Compare op:NotEq Compare op:GtE Assign Assign" - }, - { - "library": "matplotlib", - "name": "get_verticalalignment", - "source_code": "def get_verticalalignment(self): return self._verticalalignment", - "docstring": "Return the vertical alignment as a string. Will be one of 'top', 'center', 'bottom', 'baseline' or 'center_baseline'.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\text.py", - "ast_data": "FunctionDef name:get_verticalalignment arguments arg:self Return return:yes" - }, - { - "library": "scipy", - "name": "hermite", - "source_code": "def hermite(n, monic = False): if n < 0: raise ValueError('n must be nonnegative.') if n = = 0: n1 = n + 1 else: n1 = n x, w = roots_hermite(n1) def wfunc(x): return exp(-x * x) if n = = 0: x, w = ([], []) hn = 2 ** n * _gam(n + 1) * sqrt(pi) kn = 2 ** n p = orthopoly1d(x, w, hn, kn, wfunc, (-inf, inf), monic, lambda x: _ufuncs.eval_hermite(n, x)) return p", - "docstring": "Physicist's Hermite polynomial. Defined by .. math:: H_n(x) = (-1)^ne^{x^2}\\frac{d^n}{dx^n}e^{-x^2}; :math: is a polynomial of degree :math:. Parameters ---------- n : int Degree of the polynomial. monic : bool, optional If , scale the leading coefficient to be 1. Default is . Returns ------- H : orthopoly1d Hermite polynomial. Notes ----- The polynomials :math: are orthogonal over :math: with weight function :math:. Examples -------- >>> from scipy import special >>> import matplotlib.pyplot as plt >>> import numpy as np >>> p_monic = special.hermite(3, monic=True) >>> p_monic poly1d([ 1. , 0. , -1.5, 0. ]) >>> p_monic(1) -0.49999999999999983 >>> x = np.linspace(-3, 3, 400) >>> y = p_monic(x) >>> plt.plot(x, y) >>> plt.title(\"Monic Hermite polynomial of degree 3\") >>> plt.xlabel(\"x\") >>> plt.ylabel(\"H_3(x)\") >>> plt.show()", - "type": "function", - "file_path": "scipy\\scipy\\special\\_orthogonal.py", - "ast_data": "FunctionDef name:hermite arguments arg:n arg:monic If Compare op:Lt Raise raises:ValueError('n must be nonnegative.') If Compare op:Eq Assign Assign Assign Call call:roots_hermite FunctionDef name:wfunc arguments arg:x Return return:yes If Compare op:Eq Assign Assign Assign Assign Call call:orthopoly1d Return return:yes" - }, - { - "library": "django", - "name": "create", - "source_code": "@classmethod def create(cls, children = None, connector = None, negated = False): obj = Node(children, connector or cls.default, negated) obj.__class__ = cls return obj", - "docstring": "Create a new instance using Node() instead of __init__() as some subclasses, e.g. django.db.models.query_utils.Q, may implement a custom __init__() with a signature that conflicts with the one defined in Node.__init__().", - "type": "method", - "file_path": "django\\django\\utils\\tree.py", - "ast_data": "FunctionDef name:create arguments arg:cls arg:children arg:connector arg:negated Assign Call call:Node Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "__init__", - "source_code": "def __init__(self, fig, pos, horizontal, vertical, aspect = None, anchor = 'C'): self._fig = fig self._pos = pos self._horizontal = horizontal self._vertical = vertical self._anchor = anchor self.set_anchor(anchor) self._aspect = aspect self._xrefindex = 0 self._yrefindex = 0 self._locator = None", - "docstring": "Parameters ---------- fig : Figure pos : tuple of 4 floats Position of the rectangle that will be divided. horizontal : list of :mod: Sizes for horizontal division. vertical : list of :mod: Sizes for vertical division. aspect : bool, optional Whether overall rectangular area is reduced so that the relative part of the horizontal and vertical scales have the same scale. anchor : (float, float) or {'C', 'SW', 'S', 'SE', 'E', 'NE', 'N', 'NW', 'W'}, default: 'C' Placement of the reduced rectangle, when *aspect* is True.", - "type": "method", - "file_path": "matplotlib\\lib\\mpl_toolkits\\axes_grid1\\axes_divider.py", - "ast_data": "FunctionDef name:__init__ arguments arg:self arg:fig arg:pos arg:horizontal arg:vertical arg:aspect arg:anchor Assign Assign Assign Assign Assign Assign Assign Assign Assign" - }, - { - "library": "numpy", - "name": "I", - "source_code": "@property def I(self): M, N = self.shape if M = = N: from numpy.linalg import inv as func else: from numpy.linalg import pinv as func return asmatrix(func(self))", - "docstring": "Returns the (multiplicative) inverse of invertible . Parameters ---------- None Returns ------- ret : matrix object If is non-singular, is such that `self` is singular. See Also -------- linalg.inv Examples -------- >>> m = np.matrix('[1, 2; 3, 4]'); m matrix([[1, 2], [3, 4]]) >>> m.getI() matrix([[-2. , 1. ], [ 1.5, -0.5]]) >>> m.getI() * m matrix([[ 1., 0.], # may vary [ 0., 1.]])", - "type": "method", - "file_path": "numpy\\numpy\\matrixlib\\defmatrix.py", - "ast_data": "FunctionDef name:I arguments arg:self Assign If Compare op:Eq Return return:yes" - }, - { - "library": "matplotlib", - "name": "fignum_exists", - "source_code": "def fignum_exists(num: int | str) -> bool: return _pylab_helpers.Gcf.has_fignum(num) if isinstance(num, int) else num in get_figlabels()", - "docstring": "Return whether the figure with the given id exists. Parameters ---------- num : int or str A figure identifier. Returns ------- bool Whether or not a figure with id *num* exists.", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", - "ast_data": "FunctionDef name:fignum_exists arguments arg:num type:int | str Return return:yes" - }, - { - "library": "scipy", - "name": "EggCrate", - "source_code": "class EggCrate(Benchmark): def __init__(self, dimensions = 2): Benchmark.__init__(self, dimensions) self._bounds = list(zip([-5.0] * self.N, [5.0] * self.N)) self.global_optimum = [[0.0, 0.0]] self.fglob = 0.0 def fun(self, x, *args): self.nfev + = 1 return x[0] ** 2 + x[1] ** 2 + 25 * (sin(x[0]) ** 2 + sin(x[1]) ** 2)", - "docstring": "Egg Crate objective function. This class defines the Egg Crate [1]_ global optimization problem. This is a multimodal minimization problem defined as follows: .. math:: f_{\\text{EggCrate}}(x) = x_1^2 + x_2^2 + 25 \\left[ \\sin^2(x_1) + \\sin^2(x_2) \\right] with :math: for :math:. *Global optimum*: :math: for :math: .. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions For Global Optimization Problems Int. Journal of Mathematical Modelling and Numerical Optimisation, 2013, 4, 150-194.", - "type": "class", - "file_path": "scipy\\benchmarks\\benchmarks\\go_benchmark_functions\\go_funcs_E.py", - "ast_data": "ClassDef name:EggCrate FunctionDef name:__init__ arguments arg:self arg:dimensions Assign Call call:list Assign Assign FunctionDef name:fun arguments arg:self arg:x vararg:args Return return:yes" - }, - { - "library": "pytorch", - "name": "init_from_local_shards", - "source_code": "def init_from_local_shards(local_shards: list[Shard], *global_size, process_group = None, init_rrefs = False) -> ShardedTensor: return ShardedTensor._init_from_local_shards(local_shards, *global_size, process_group = process_group, init_rrefs = init_rrefs)", - "docstring": "Creates an :class: from local shards and the global metadata. Needs to be called on all ranks in an SPMD fashion. Args: local_shards (List[:class ]): A list of shards that represent the local shards on this rank. global_size (int...): a list, tuple, or of integers defining the shape of the overall sharded tensor. Keyword args: process_group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. init_rrefs (bool, optional): Whether or not to initialize :class:s pointing to remote shards. Need to initialize the RPC Framework if specified as `ShardedTensor` object handle on this rank Examples: Suppose we want construct a sharded tensor on two ranks, global size = (10, 5), each shard have a (5, 5) local tensor, we can do it like below: on rank 0: >>> # xdoctest: +SKIP(\"not distributed\") >>> local_shard_metadata = ShardMetadata( >>> shard_offsets=[0, 0], >>> shard_lengths=[5, 5], >>> placement=\"rank:0/cuda:0\" >>> ) >>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)] >>> sharded_tensor = init_from_local_shards(local_shards, [10, 5]) on rank 1: >>> # xdoctest: +SKIP(\"not distributed\") >>> local_shard_metadata = ShardMetadata( >>> shard_offsets=[5, 0], >>> shard_lengths=[5, 5], >>> placement=\"rank:1/cuda:1\" >>> ) >>> local_shards = [Shard(torch.randn(5, 5), local_shard_metadata)] >>> sharded_tensor = init_from_local_shards(local_shards, [10, 5])", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\_shard\\sharded_tensor\\__init__.py", - "ast_data": "FunctionDef name:init_from_local_shards arguments arg:local_shards type:list[Shard] vararg:global_size Return return:yes" - }, - { - "library": "scipy", - "name": "fast_forward", - "source_code": "def fast_forward(self, n: IntNumber) -> 'QMCEngine': self.random(n = n) return self", - "docstring": "Fast-forward the sequence by positions. Parameters ---------- n : int Number of points to skip in the sequence. Returns ------- engine : QMCEngine Engine reset to its base state.", - "type": "method", - "file_path": "scipy\\scipy\\stats\\_qmc.py", - "ast_data": "FunctionDef name:fast_forward arguments arg:self arg:n type:IntNumber Return return:yes" - }, - { - "library": "matplotlib", - "name": "refine_triangulation", - "source_code": "def refine_triangulation(self, return_tri_index = False, subdiv = 3): refi_triangulation = self._triangulation ntri = refi_triangulation.triangles.shape[0] ancestors = np.arange(ntri, dtype = np.int32) for _ in range(subdiv): refi_triangulation, ancestors = self._refine_triangulation_once(refi_triangulation, ancestors) refi_npts = refi_triangulation.x.shape[0] refi_triangles = refi_triangulation.triangles if return_tri_index: found_index = np.full(refi_npts, -1, dtype = np.int32) tri_mask = self._triangulation.mask if tri_mask is None: found_index[refi_triangles] = np.repeat(ancestors, 3).reshape(-1, 3) else: ancestor_mask = tri_mask[ancestors] found_index[refi_triangles[ancestor_mask, :]] = np.repeat(ancestors[ancestor_mask], 3).reshape(-1, 3) found_index[refi_triangles[~ancestor_mask, :]] = np.repeat(ancestors[~ancestor_mask], 3).reshape(-1, 3) return (refi_triangulation, found_index) else: return refi_triangulation", - "docstring": "Compute a uniformly refined triangulation *refi_triangulation* of the encapsulated :attr:. This function refines the encapsulated triangulation by splitting each father triangle into 4 child sub-triangles built on the edges midside nodes, recursing *subdiv* times. In the end, each triangle is hence divided into `~matplotlib.tri.Triangulation` The refined triangulation. found_index : int array Index of the initial triangulation containing triangle, for each point of *refi_triangulation*. Returned only if *return_tri_index* is set to True.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\tri\\_trirefine.py", - "ast_data": "FunctionDef name:refine_triangulation arguments arg:self arg:return_tri_index arg:subdiv Assign Assign Assign Call call:arange For Call call:range Assign Call call:_refine_triangulation_once Assign Assign If Assign Call call:full Assign If Compare op:Is Assign Call call:reshape Assign Assign Call call:reshape Assign Call call:reshape Return return:yes Return return:yes" - }, - { - "library": "pytorch", - "name": "fuse_convtranspose_bn", - "source_code": "def fuse_convtranspose_bn(is_qat, convt, bn): assert convt.training = = bn.training, 'ConvTranspose and BN both must be in the same mode (train or eval).' if is_qat: raise Exception('Fusing ConvTranspose+BatchNorm not yet supported in QAT.') else: return nn.utils.fusion.fuse_conv_bn_eval(convt, bn, transpose = True)", - "docstring": "Return the fused ConvTranspose and bn modules. Given ConvTranspose and bn modules, fuses them and returns the fused module Args: convt: Module instance of type ConvTransposeNd bn: BatchNormNd instance that needs to be fused with the linear layer. batch norm N should match the ConvTranspose N Examples:: >>> m1 = nn.ConvTranspose2d(10, 20, 3) >>> b1 = nn.BatchNorm2d(20) >>> # xdoctest: +SKIP >>> m2 = fuse_convtranspose_bn(m1, b1)", - "type": "function", - "file_path": "pytorch\\torch\\ao\\quantization\\fuser_method_mappings.py", - "ast_data": "FunctionDef name:fuse_convtranspose_bn arguments arg:is_qat arg:convt arg:bn If Raise raises:Exception('Fusing ConvTranspose+BatchNorm not yet supported in QAT.') Return return:yes" - }, - { - "library": "pandas", - "name": "length_of_indexer", - "source_code": "def length_of_indexer(indexer, target = None) -> int: if target is not None and isinstance(indexer, slice): target_len = len(target) start = indexer.start stop = indexer.stop step = indexer.step if start is None: start = 0 elif start < 0: start + = target_len if stop is None or stop > target_len: stop = target_len elif stop < 0: stop + = target_len if step is None: step = 1 elif step < 0: start, stop = (stop + 1, start + 1) step = -step return (stop - start + step - 1) // step elif isinstance(indexer, (ABCSeries, ABCIndex, np.ndarray, list)): if isinstance(indexer, list): indexer = np.array(indexer) if indexer.dtype = = bool: return indexer.sum() return len(indexer) elif isinstance(indexer, range): return (indexer.stop - indexer.start) // indexer.step elif not is_list_like_indexer(indexer): return 1 raise AssertionError('cannot find the length of the indexer')", - "docstring": "Return the expected length of target[indexer] Returns ------- int", - "type": "function", - "file_path": "pandas\\pandas\\core\\indexers\\utils.py", - "ast_data": "FunctionDef name:length_of_indexer arguments arg:indexer arg:target If BoolOp Compare op:IsNot Call call:isinstance Assign Call call:len Assign Assign Assign If Compare op:Is Assign If Compare op:Lt If BoolOp Compare op:Is Compare op:Gt Assign If Compare op:Lt If Compare op:Is Assign If Compare op:Lt Assign Assign Return return:yes If Call call:isinstance If Call call:isinstance Assign Call call:array If Compare op:Eq Return return:yes Return return:yes If Call call:isinstance Return return:yes If Return return:yes Raise raises:AssertionError('cannot find the length of the indexer')" - }, - { - "library": "scikit-learn", - "name": "fit_transform", - "source_code": "def fit_transform(self, y): y = column_or_1d(y, warn = True) self.classes_, y = _unique(y, return_inverse = True) return y", - "docstring": "Fit label encoder and return encoded labels. Parameters ---------- y : array-like of shape (n_samples,) Target values. Returns ------- y : array-like of shape (n_samples,) Encoded labels.", - "type": "method", - "file_path": "scikit-learn\\sklearn\\preprocessing\\_label.py", - "ast_data": "FunctionDef name:fit_transform arguments arg:self arg:y Assign Call call:column_or_1d Assign Call call:_unique Return return:yes" - }, - { - "library": "seaborn", - "name": "define_bin_params", - "source_code": "def define_bin_params(self, x1, x2 = None, weights = None, cache = True): if x2 is None: bin_edges = self._define_bin_edges(x1, weights, self.bins, self.binwidth, self.binrange, self.discrete) if isinstance(self.bins, (str, Number)): n_bins = len(bin_edges) - 1 bin_range = (bin_edges.min(), bin_edges.max()) bin_kws = dict(bins = n_bins, range = bin_range) else: bin_kws = dict(bins = bin_edges) else: bin_edges = [] for i, x in enumerate([x1, x2]): bins = self.bins if not bins or isinstance(bins, (str, Number)): pass elif isinstance(bins[i], str): bins = bins[i] elif len(bins) = = 2: bins = bins[i] binwidth = self.binwidth if binwidth is None: pass elif not isinstance(binwidth, Number): binwidth = binwidth[i] binrange = self.binrange if binrange is None: pass elif not isinstance(binrange[0], Number): binrange = binrange[i] discrete = self.discrete if not isinstance(discrete, bool): discrete = discrete[i] bin_edges.append(self._define_bin_edges(x, weights, bins, binwidth, binrange, discrete)) bin_kws = dict(bins = tuple(bin_edges)) if cache: self.bin_kws = bin_kws return bin_kws", - "docstring": "Given data, return numpy.histogram parameters to define bins.", - "type": "method", - "file_path": "seaborn\\seaborn\\_statistics.py", - "ast_data": "FunctionDef name:define_bin_params arguments arg:self arg:x1 arg:x2 arg:weights arg:cache If Compare op:Is Assign Call call:_define_bin_edges If Call call:isinstance Assign Assign Assign Call call:dict Assign Call call:dict Assign For Call call:enumerate Assign If BoolOp Call call:isinstance If Call call:isinstance Assign If Compare op:Eq Assign Assign If Compare op:Is If Assign Assign If Compare op:Is If Assign Assign If Assign Assign Call call:dict If Assign Return return:yes" - }, - { - "library": "kornia", - "name": "inverse", - "source_code": "def inverse(self) -> Se3: r_inv = self.r.inverse() _t = -1 * self.t if isinstance(_t, int): raise TypeError('Unexpected integer from `-1 * translation`') return Se3(r_inv, r_inv * _t)", - "docstring": "Return the inverse transformation. Example: >>> s = Se3(So3.identity(), torch.ones(3)) >>> s_inv = s.inverse() >>> s_inv.r Parameter containing: tensor([1., -0., -0., -0.], requires_grad=True) >>> s_inv.t Parameter containing: tensor([-1., -1., -1.], requires_grad=True)", - "type": "method", - "file_path": "kornia\\kornia\\geometry\\liegroup\\se3.py", - "ast_data": "FunctionDef name:inverse arguments arg:self Assign Call call:inverse Assign If Call call:isinstance Raise raises:TypeError('Unexpected integer from `-1 * translation`') Return return:yes" - }, - { - "library": "pytorch", - "name": "zero_step", - "source_code": "def zero_step(fut: torch.futures.Future) -> torch.Tensor: overlap_info = zero._overlap_info bucket_index = bucket.index() rank = zero.global_rank assigned_ranks = overlap_info.assigned_ranks_per_bucket[bucket_index] overlap_info.bucket_indices_seen.append(bucket_index) if rank in assigned_ranks: _perform_local_step(bucket, zero, rank) _broadcast_bucket(bucket_index, zero) num_buckets = len(overlap_info.params_per_bucket) if len(overlap_info.bucket_indices_seen) = = num_buckets: overlap_info.wait_for_broadcasts() overlap_info.clear_per_iter_info() return bucket.buffer()", - "docstring": "Perform partial :class: :meth: using gradients in the :class:. Returns: A :class: representing the contents of the gradient bucket.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\algorithms\\ddp_comm_hooks\\ddp_zero_hook.py", - "ast_data": "FunctionDef name:zero_step arguments arg:fut type:torch.futures.Future Assign Assign Call call:index Assign Assign If Compare op:In Assign Call call:len If Compare op:Eq Return return:yes" - }, - { - "library": "kornia", - "name": "VerticalFlip", - "source_code": "class VerticalFlip(OperationBase): def __init__(self, initial_probability: float = 0.5, temperature: float = 0.1) -> None: super().__init__(K.RandomVerticalFlip(same_on_batch = False, p = initial_probability), initial_magnitude = None, temperature = temperature, symmetric_megnitude = False)", - "docstring": "Apply vertical flip operation. Args: initial_magnitude: the initial magnitude. temperature: temperature for RelaxedBernoulli distribution used during training.", - "type": "class", - "file_path": "kornia\\kornia\\augmentation\\auto\\operations\\ops.py", - "ast_data": "ClassDef name:VerticalFlip FunctionDef name:__init__ arguments arg:self arg:initial_probability type:float arg:temperature type:float" - }, - { - "library": "django", - "name": "pre_sql_setup", - "source_code": "def pre_sql_setup(self, with_col_aliases = False): self.setup_query(with_col_aliases = with_col_aliases) order_by = self.get_order_by() self.where, self.having, self.qualify = self.query.where.split_having_qualify(must_group_by = self.query.group_by is not None) extra_select = self.get_extra_select(order_by, self.select) self.has_extra_select = bool(extra_select) group_by = self.get_group_by(self.select + extra_select, order_by) return (extra_select, order_by, group_by)", - "docstring": "Do any necessary class setup immediately prior to producing SQL. This is for things that can't necessarily be done in __init__ because we might not have all the pieces in place at that time.", - "type": "method", - "file_path": "django\\django\\db\\models\\sql\\compiler.py", - "ast_data": "FunctionDef name:pre_sql_setup arguments arg:self arg:with_col_aliases Assign Call call:get_order_by Assign Call call:split_having_qualify Assign Call call:get_extra_select Assign Call call:bool Assign Call call:get_group_by Return return:yes" - }, - { - "library": "pytorch", - "name": "barrier", - "source_code": "@_exception_logger def barrier(group: Optional[ProcessGroup] = GroupMember.WORLD, async_op = False, device_ids = None): group = group or _get_default_group() if _rank_not_in_group(group): _warn_not_in_group('barrier') return opts = BarrierOptions() opts.asyncOp = async_op device = torch._C._get_accelerator() if isinstance(device_ids, list): opts.device_ids = device_ids opts.device = torch.device(device.type, device_ids[0]) elif getattr(group, 'bound_device_id', None) is not None: opts.device = group.bound_device_id elif device.type = = 'cpu' or _get_object_coll_device(group) = = 'cpu': opts.device = torch.device('cpu') else: opts.device = device warnings.warn('No device id is provided via `init_process_group` or `barrier `. Using the current device set by the user. ') work = group.barrier(opts = opts) if async_op: return work elif work is not None: work.wait()", - "docstring": "Synchronize all processes. This collective blocks processes until the whole group enters this function, if async_op is False, or if async work handle is called on wait(). Args: group (ProcessGroup, optional): The process group to work on. If None, the default process group will be used. async_op (bool, optional): Whether this op should be an async op device_ids ([int], optional): List of device/GPU ids. Only one id is expected. Returns: Async work handle, if async_op is set to True. None, if not async_op or if not part of the group .. note:: now blocks the cpu thread till the completion of the barrier collective.", - "type": "function", - "file_path": "pytorch\\torch\\distributed\\distributed_c10d.py", - "ast_data": "FunctionDef name:barrier arguments arg:group type:Optional[ProcessGroup] arg:async_op arg:device_ids Assign BoolOp Call call:_get_default_group If Call call:_rank_not_in_group Return return:no Assign Call call:BarrierOptions Assign Assign Call call:_get_accelerator If Call call:isinstance Assign Assign Call call:device If Compare op:IsNot Assign If BoolOp Compare op:Eq Compare op:Eq Assign Call call:device Assign Assign Call call:barrier If Return return:yes If Compare op:IsNot" - }, - { - "library": "pytorch", - "name": "__new__", - "source_code": "@staticmethod @torch._disable_dynamo def __new__(cls, local_tensor: torch.Tensor, spec: DTensorSpec, *, requires_grad: bool) -> 'DTensor': if local_tensor.requires_grad and (not requires_grad): warnings.warn(\"To construct DTensor from torch.Tensor, it's recommended to use local_tensor.detach() and make requires_grad consistent.\") assert spec.tensor_meta is not None, 'TensorMeta should not be None!' r = torch.Tensor._make_wrapper_subclass(cls, spec.tensor_meta.shape, strides = spec.tensor_meta.stride, dtype = local_tensor.dtype, device = local_tensor.device, layout = local_tensor.layout, requires_grad = requires_grad) r._spec = spec r._local_tensor = local_tensor return r", - "docstring": "Construct a DTensor from a local tensor, device mesh, and placement and other tensor properties (i.e. shape, requires_grad, strides, etc). .. note:: This is not a public API and it's only supposed to be used by the operator implementations and internals. If you want to construct a DTensor from a local tensor, consider using ``.", - "type": "method", - "file_path": "pytorch\\torch\\distributed\\tensor\\_api.py", - "ast_data": "FunctionDef name:__new__ arguments arg:cls arg:local_tensor type:torch.Tensor arg:spec type:DTensorSpec If BoolOp Assign Call call:_make_wrapper_subclass Assign Assign Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_autoscale_on", - "source_code": "def get_autoscale_on(self): return all((axis._get_autoscale_on() for axis in self._axis_map.values()))", - "docstring": "Return True if each axis is autoscaled, False otherwise.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\axes\\_base.py", - "ast_data": "FunctionDef name:get_autoscale_on arguments arg:self Return return:yes" - }, - { - "library": "matplotlib", - "name": "get_trifinder", - "source_code": "def get_trifinder(self): if self._trifinder is None: from matplotlib.tri._trifinder import TrapezoidMapTriFinder self._trifinder = TrapezoidMapTriFinder(self) return self._trifinder", - "docstring": "Return the default of this triangulation, creating it if necessary. This allows the same TriFinder object to be easily shared.", - "type": "method", - "file_path": "matplotlib\\lib\\matplotlib\\tri\\_triangulation.py", - "ast_data": "FunctionDef name:get_trifinder arguments arg:self If Compare op:Is Assign Call call:TrapezoidMapTriFinder Return return:yes" - }, - { - "library": "django", - "name": "to_python", - "source_code": "def to_python(self, value): value = super(IntegerField, self).to_python(value) if value in self.empty_values: return None if self.localize: value = formats.sanitize_separators(value) try: value = float(value) except (ValueError, TypeError): raise ValidationError(self.error_messages['invalid'], code = 'invalid') return value", - "docstring": "Validate that float() can be called on the input. Return the result of float() or None for empty values.", - "type": "method", - "file_path": "django\\django\\forms\\fields.py", - "ast_data": "FunctionDef name:to_python arguments arg:self arg:value Assign Call call:to_python If Compare op:In Return return:yes If Assign Call call:sanitize_separators Try Assign Call call:float ExceptHandler Raise raises:ValidationError(self.error_messages['invalid'], code='invalid') Return return:yes" - }, - { - "library": "matplotlib", - "name": "subplot_tool", - "source_code": "def subplot_tool(targetfig: Figure | None = None) -> SubplotTool | None: if targetfig is None: targetfig = gcf() tb = targetfig.canvas.manager.toolbar if hasattr(tb, 'configure_subplots'): from matplotlib.backend_bases import NavigationToolbar2 return cast(NavigationToolbar2, tb).configure_subplots() elif hasattr(tb, 'trigger_tool'): from matplotlib.backend_bases import ToolContainerBase cast(ToolContainerBase, tb).trigger_tool('subplots') return None else: raise ValueError('subplot_tool can only be launched for figures with an associated toolbar')", - "docstring": "Launch a subplot tool window for a figure. Returns -------", - "type": "function", - "file_path": "matplotlib\\lib\\matplotlib\\pyplot.py", - "ast_data": "FunctionDef name:subplot_tool arguments arg:targetfig type:Figure | None If Compare op:Is Assign Call call:gcf Assign If Call call:hasattr Return return:yes If Call call:hasattr Return return:yes Raise raises:ValueError('subplot_tool can only be launched for figures with an associated toolbar')" - }, - { - "library": "pytorch", - "name": "get_rng_state", - "source_code": "def get_rng_state(device: Union[int, str, torch.device] = 'cuda') -> Tensor: _lazy_init() if isinstance(device, str): device = torch.device(device) elif isinstance(device, int): device = torch.device('cuda', device) idx = device.index if idx is None: idx = current_device() default_generator = torch.cuda.default_generators[idx] return default_generator.get_state()", - "docstring": "Return the random number generator state of the specified GPU as a ByteTensor. Args: device (torch.device or int, optional): The device to return the RNG state of. Default: ``, the current CUDA device). .. warning:: This function eagerly initializes CUDA.", - "type": "function", - "file_path": "pytorch\\torch\\cuda\\random.py", - "ast_data": "FunctionDef name:get_rng_state arguments arg:device type:Union[int, str, torch.device] If Call call:isinstance Assign Call call:device If Call call:isinstance Assign Call call:device Assign If Compare op:Is Assign Call call:current_device Assign Return return:yes" - }, - { - "library": "pygame", - "name": "sprites", - "source_code": "def sprites(self): return self._spritelist.copy()", - "docstring": "return a ordered list of sprites (first back, last top). LayeredUpdates.sprites(): return sprites", - "type": "method", - "file_path": "pygame\\src_py\\sprite.py", - "ast_data": "FunctionDef name:sprites arguments arg:self Return return:yes" - }, - { - "library": "tensorflow", - "name": "add_tensor_filter", - "source_code": "def add_tensor_filter(self, filter_name, filter_callable): if not isinstance(filter_name, str): raise TypeError('Input argument filter_name is expected to be str, but is not.') if not filter_name: raise ValueError('Input argument filter_name cannot be empty.') if not callable(filter_callable): raise TypeError('Input argument filter_callable is expected to be callable, but is not.') self._tensor_filters[filter_name] = filter_callable", - "docstring": "Add a tensor filter. A tensor filter is a named callable of the signature: filter_callable(dump_datum, tensor), wherein dump_datum is an instance of debug_data.DebugTensorDatum carrying metadata about the dumped tensor, including tensor name, timestamps, etc. tensor is the value of the dumped tensor as an numpy.ndarray object. The return value of the function is a bool. This is the same signature as the input argument to debug_data.DebugDumpDir.find(). Args: filter_name: (str) name of the filter. Cannot be empty. filter_callable: (callable) a filter function of the signature described as above. Raises: ValueError: If filter_name is an empty str. TypeError: If filter_name is not a str. Or if filter_callable is not callable.", - "type": "method", - "file_path": "tensorflow\\tensorflow\\python\\debug\\cli\\analyzer_cli.py", - "ast_data": "FunctionDef name:add_tensor_filter arguments arg:self arg:filter_name arg:filter_callable If Raise raises:TypeError('Input argument filter_name is expected to be str, but is not.') If Raise raises:ValueError('Input argument filter_name cannot be empty.') If Raise raises:TypeError('Input argument filter_callable is expected to be callable, but is not.') Assign" - }, - { - "library": "pygame", - "name": "map_array", - "source_code": "def map_array(surface, array): if array.ndim = = 0: raise ValueError('array must have at least 1 dimension') shape = array.shape if shape[-1] ! = 3: raise ValueError('array must be a 3d array of 3-value color data') target = numpy_empty(shape[: -1], numpy.int32) pix_map_array(target, array, surface) return target", - "docstring": "pygame.surfarray.map_array(Surface, array3d): return array2d map a 3d array into a 2d array Convert a 3D array into a 2D array. This will use the given Surface format to control the conversion. Note: arrays do not need to be 3D, as long as the minor axis has three elements giving the component colours, any array shape can be used (for example, a single colour can be mapped, or an array of colours). The array shape is limited to eleven dimensions maximum, including the three element minor axis.", - "type": "function", - "file_path": "pygame\\src_py\\surfarray.py", - "ast_data": "FunctionDef name:map_array arguments arg:surface arg:array If Compare op:Eq Raise raises:ValueError('array must have at least 1 dimension') Assign If Compare op:NotEq Raise raises:ValueError('array must be a 3d array of 3-value color data') Assign Call call:numpy_empty Return return:yes" - }, - { - "library": "kornia", - "name": "ConvQuadInterp3d", - "source_code": "class ConvQuadInterp3d(Module): def __init__(self, strict_maxima_bonus: float = 10.0, eps: float = 1e-07) -> None: super().__init__() self.strict_maxima_bonus = strict_maxima_bonus self.eps = eps def __repr__(self) -> str: return f'{self.__class__.__name__}(strict_maxima_bonus = {self.strict_maxima_bonus})' def forward(self, x: Tensor) -> tuple[Tensor, Tensor]: return conv_quad_interp3d(x, self.strict_maxima_bonus, self.eps)", - "docstring": "Calculate soft argmax 3d per window. See :func: for details.", - "type": "class", - "file_path": "kornia\\kornia\\geometry\\subpix\\spatial_soft_argmax.py", - "ast_data": "ClassDef name:ConvQuadInterp3d FunctionDef name:__init__ arguments arg:self arg:strict_maxima_bonus type:float arg:eps type:float Assign Assign FunctionDef name:__repr__ arguments arg:self Return return:yes FunctionDef name:forward arguments arg:self arg:x type:Tensor Return return:yes" - }, - { - "library": "tensorflow", - "name": "convert_gru_weights", - "source_code": "def convert_gru_weights(weights, from_cudnn = True): kernels = transform_kernels(weights[0], transpose_input(from_cudnn), n_gates) recurrent_kernels = transform_kernels(weights[1], lambda k: k.T, n_gates) biases = np.array(weights[2]).reshape((2, -1) if from_cudnn else -1) return [kernels, recurrent_kernels, biases]", - "docstring": "Converts the weights between CuDNNGRU and GRU. Args: weights: Original weights. from_cudnn: Indicates whether original weights are from CuDNN layer. Returns: Updated weights compatible with GRU.", - "type": "function", - "file_path": "tensorflow\\tensorflow\\python\\keras\\saving\\hdf5_format.py", - "ast_data": "FunctionDef name:convert_gru_weights arguments arg:weights arg:from_cudnn Assign Call call:transform_kernels Assign Call call:transform_kernels Assign Call call:reshape Return return:yes" - } -] \ No newline at end of file