Fix docstrings with last version of hf-doc-builder styler (#18581)
* Fix docstrings with last version of hf-doc-builder styler * Remove empty Parameter block
This commit is contained in:
parent
42b8940b34
commit
c23cbdff4c
|
@ -79,7 +79,6 @@ def separate_process_wrapper_fn(func: Callable[[], None], do_multi_processing: b
|
|||
measurements it is important that the function is executed in a separate process
|
||||
|
||||
Args:
|
||||
|
||||
- `func`: (`callable`): function() -> ... generic function which will be executed in its own separate process
|
||||
- `do_multi_processing`: (`bool`) Whether to run function on separate process or not
|
||||
"""
|
||||
|
@ -210,7 +209,6 @@ def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_i
|
|||
https://github.com/pythonprofilers/memory_profiler/blob/895c4ac7a08020d66ae001e24067da6dcea42451/memory_profiler.py#L239
|
||||
|
||||
Args:
|
||||
|
||||
- `function`: (`callable`): function() -> ... function without any arguments to measure for which to measure
|
||||
the peak memory
|
||||
|
||||
|
@ -228,7 +226,6 @@ def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_i
|
|||
measures current cpu memory usage of a given `process_id`
|
||||
|
||||
Args:
|
||||
|
||||
- `process_id`: (`int`) process_id for which to measure memory
|
||||
|
||||
Returns
|
||||
|
@ -336,7 +333,6 @@ def start_memory_tracing(
|
|||
https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info
|
||||
|
||||
Args:
|
||||
|
||||
- `modules_to_trace`: (None, string, list/tuple of string) if None, all events are recorded if string or list
|
||||
of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or
|
||||
'transformers.models.gpt2.modeling_gpt2')
|
||||
|
@ -483,7 +479,6 @@ def stop_memory_tracing(
|
|||
Stop memory tracing cleanly and return a summary of the memory trace if a trace is given.
|
||||
|
||||
Args:
|
||||
|
||||
`memory_trace` (optional output of start_memory_tracing, default: None):
|
||||
memory trace to convert in summary
|
||||
`ignore_released_memory` (boolean, default: None):
|
||||
|
|
|
@ -208,7 +208,6 @@ class FlaxGenerationMixin:
|
|||
post](https://huggingface.co/blog/how-to-generate).
|
||||
|
||||
Parameters:
|
||||
|
||||
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
|
||||
The sequence used as a prompt for the generation.
|
||||
max_length (`int`, *optional*, defaults to `model.config.max_length`):
|
||||
|
|
|
@ -418,7 +418,6 @@ class TFGenerationMixin:
|
|||
post](https://huggingface.co/blog/how-to-generate).
|
||||
|
||||
Parameters:
|
||||
|
||||
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, `(batch_size, sequence_length,
|
||||
feature_dim)` or `(batch_size, num_channels, height, width)`, *optional*):
|
||||
The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the
|
||||
|
@ -1336,7 +1335,6 @@ class TFGenerationMixin:
|
|||
post](https://huggingface.co/blog/how-to-generate).
|
||||
|
||||
Parameters:
|
||||
|
||||
input_ids (`tf.Tensor` of `dtype=tf.int32` and shape `(batch_size, sequence_length)`, *optional*):
|
||||
The sequence used as a prompt for the generation. If `None` the method initializes it with
|
||||
`bos_token_id` and a batch size of 1.
|
||||
|
@ -2070,7 +2068,6 @@ class TFGenerationMixin:
|
|||
Generates sequences for models with a language modeling head using greedy decoding.
|
||||
|
||||
Parameters:
|
||||
|
||||
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
|
||||
The sequence used as a prompt for the generation.
|
||||
logits_processor (`TFLogitsProcessorList`, *optional*):
|
||||
|
@ -2323,7 +2320,6 @@ class TFGenerationMixin:
|
|||
Generates sequences for models with a language modeling head using multinomial sampling.
|
||||
|
||||
Parameters:
|
||||
|
||||
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
|
||||
The sequence used as a prompt for the generation.
|
||||
logits_processor (`TFLogitsProcessorList`, *optional*):
|
||||
|
@ -2600,7 +2596,6 @@ class TFGenerationMixin:
|
|||
Generates sequences for models with a language modeling head using beam search with multinomial sampling.
|
||||
|
||||
Parameters:
|
||||
|
||||
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
|
||||
The sequence used as a prompt for the generation.
|
||||
max_length (`int`, *optional*, defaults to 20):
|
||||
|
|
|
@ -1555,7 +1555,6 @@ class GenerationMixin:
|
|||
used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
|
||||
|
||||
Parameters:
|
||||
|
||||
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
||||
The sequence used as a prompt for the generation.
|
||||
logits_processor (`LogitsProcessorList`, *optional*):
|
||||
|
@ -1789,7 +1788,6 @@ class GenerationMixin:
|
|||
can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
|
||||
|
||||
Parameters:
|
||||
|
||||
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
||||
The sequence used as a prompt for the generation.
|
||||
logits_processor (`LogitsProcessorList`, *optional*):
|
||||
|
@ -2046,7 +2044,6 @@ class GenerationMixin:
|
|||
can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
|
||||
|
||||
Parameters:
|
||||
|
||||
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
||||
The sequence used as a prompt for the generation.
|
||||
beam_scorer (`BeamScorer`):
|
||||
|
@ -2355,7 +2352,6 @@ class GenerationMixin:
|
|||
sampling** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
|
||||
|
||||
Parameters:
|
||||
|
||||
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
||||
The sequence used as a prompt for the generation.
|
||||
beam_scorer (`BeamScorer`):
|
||||
|
@ -2672,7 +2668,6 @@ class GenerationMixin:
|
|||
decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
|
||||
|
||||
Parameters:
|
||||
|
||||
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
|
||||
The sequence used as a prompt for the generation.
|
||||
beam_scorer (`BeamScorer`):
|
||||
|
|
|
@ -80,8 +80,6 @@ class ModelCard:
|
|||
Inioluwa Deborah Raji and Timnit Gebru for the proposal behind model cards. Link: https://arxiv.org/abs/1810.03993
|
||||
|
||||
Note: A model card can be loaded and saved to disk.
|
||||
|
||||
Parameters:
|
||||
"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
|
|
|
@ -563,7 +563,6 @@ class _LazyAutoMapping(OrderedDict):
|
|||
" A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed.
|
||||
|
||||
Args:
|
||||
|
||||
- config_mapping: The map model type to config class
|
||||
- model_mapping: The map model type to model (or tokenizer) class
|
||||
"""
|
||||
|
|
|
@ -130,7 +130,6 @@ class FlaubertTokenizer(XLMTokenizer):
|
|||
- Install with `pip install sacremoses`
|
||||
|
||||
Args:
|
||||
|
||||
- bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
|
||||
(bool). If True, we only apply BPE.
|
||||
|
||||
|
|
|
@ -354,7 +354,6 @@ class FSMTTokenizer(PreTrainedTokenizer):
|
|||
- Install with `pip install sacremoses`
|
||||
|
||||
Args:
|
||||
|
||||
- lang: ISO language code (default = 'en') (string). Languages should belong of the model supported
|
||||
languages. However, we don't enforce it.
|
||||
- bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
|
||||
|
|
|
@ -1960,7 +1960,6 @@ def build_position_encoding(
|
|||
Builds the position encoding.
|
||||
|
||||
Args:
|
||||
|
||||
- out_channels: refers to the number of channels of the position encodings.
|
||||
- project_pos_dim: if specified, will project the position encodings to this dimension.
|
||||
|
||||
|
|
|
@ -1398,7 +1398,6 @@ class TapexTokenizer(PreTrainedTokenizer):
|
|||
):
|
||||
"""
|
||||
Args:
|
||||
|
||||
table_content:
|
||||
{"header": xxx, "rows": xxx, "id" (Optionally): xxx}
|
||||
|
||||
|
|
|
@ -523,7 +523,6 @@ class TransfoXLPreTrainedModel(PreTrainedModel):
|
|||
weights embeddings afterwards if the model class has a *tie_weights()* method.
|
||||
|
||||
Arguments:
|
||||
|
||||
new_num_tokens: (*optional*) int:
|
||||
New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at
|
||||
the end. Reducing the size will remove vectors from the end. If not provided or None: does nothing and
|
||||
|
|
|
@ -791,7 +791,6 @@ class XLMTokenizer(PreTrainedTokenizer):
|
|||
externally, and set `bypass_tokenizer=True` to bypass the tokenizer.
|
||||
|
||||
Args:
|
||||
|
||||
- lang: ISO language code (default = 'en') (string). Languages should belong of the model supported
|
||||
languages. However, we don't enforce it.
|
||||
- bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
|
||||
|
|
|
@ -1286,7 +1286,6 @@ def pytest_terminal_summary_main(tr, id):
|
|||
there.
|
||||
|
||||
Args:
|
||||
|
||||
- tr: `terminalreporter` passed from `conftest.py`
|
||||
- id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
|
||||
needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.
|
||||
|
|
|
@ -377,7 +377,6 @@ class DistributedTensorGatherer:
|
|||
For some reason, that's not going to roll their boat. This class is there to solve that problem.
|
||||
|
||||
Args:
|
||||
|
||||
world_size (`int`):
|
||||
The number of processes used in the distributed training.
|
||||
num_samples (`int`):
|
||||
|
|
|
@ -337,7 +337,6 @@ def speed_metrics(split, start_time, num_samples=None, num_steps=None):
|
|||
should be run immediately after the operation to be measured has completed.
|
||||
|
||||
Args:
|
||||
|
||||
- split: name to prefix metric (like train, eval, test...)
|
||||
- start_time: operation start time
|
||||
- num_samples: number of samples processed
|
||||
|
|
|
@ -120,7 +120,6 @@ class NotebookProgressBar:
|
|||
The main method to update the progress bar to `value`.
|
||||
|
||||
Args:
|
||||
|
||||
value (`int`):
|
||||
The value to use. Must be between 0 and `total`.
|
||||
force_update (`bool`, *optional*, defaults to `False`):
|
||||
|
@ -204,7 +203,6 @@ class NotebookTrainingTracker(NotebookProgressBar):
|
|||
An object tracking the updates of an ongoing training with progress bars and a nice table reporting metrics.
|
||||
|
||||
Args:
|
||||
|
||||
num_steps (`int`): The number of steps during training. column_names (`List[str]`, *optional*):
|
||||
The list of column names for the metrics table (will be inferred from the first call to
|
||||
[`~utils.notebook.NotebookTrainingTracker.write_line`] if not set).
|
||||
|
|
Loading…
Reference in New Issue