Fix llava next tie_word_embeddings config (#30640)

* fix llava next embedding

* add docstring

* Update src/transformers/models/llava_next/configuration_llava_next.py

Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>

---------

Co-authored-by: NielsRogge <48327001+NielsRogge@users.noreply.github.com>
This commit is contained in:
Marc Sun 2024-05-06 14:01:26 +02:00 committed by GitHub
parent 9c772ac888
commit aa64f086a2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
1 changed files with 4 additions and 1 deletions

View File

@ -55,6 +55,8 @@ class LlavaNextConfig(PretrainedConfig):
image_grid_pinpoints (`List`, *optional*, defaults to `[[336, 672], [672, 336], [672, 672], [1008, 336], [336, 1008]]`):
A list of possible resolutions to use for processing high resolution images. Each item in the list should be a tuple or list
of the form `(height, width)`.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether the model's input and output word embeddings should be tied.
Example:
@ -90,6 +92,7 @@ class LlavaNextConfig(PretrainedConfig):
vision_feature_select_strategy="default",
vision_feature_layer=-2,
image_grid_pinpoints=None,
tie_word_embeddings=False,
**kwargs,
):
self.ignore_index = ignore_index
@ -138,4 +141,4 @@ class LlavaNextConfig(PretrainedConfig):
self.text_config = text_config
super().__init__(**kwargs)
super().__init__(tie_word_embeddings=tie_word_embeddings, **kwargs)