Skip to content

Commit 4d958e3

Browse files
mengker33huijuanzh
authored andcommitted
Fix for GenerationConfig issue for pipeline init changes by transformers update (#2266)
Co-authored-by: Zhou, Huijuan <huijuan.zhou@intel.com>
1 parent 18aee59 commit 4d958e3

File tree

3 files changed

+14
-0
lines changed

3 files changed

+14
-0
lines changed

optimum/habana/transformers/modeling_utils.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -309,6 +309,7 @@
309309
gaudi_XLMRoberta_Sdpa_SelfAttention_forward,
310310
)
311311
from .models.deepseek_v2.modeling_deepseek_v2 import DeepseekV2ForCausalLM as GaudiDeepseekV2ForCausalLM
312+
from .pipelines import GaudiImageToTextPipeline
312313

313314

314315
def adapt_transformers_to_gaudi():
@@ -388,6 +389,9 @@ def adapt_transformers_to_gaudi():
388389
transformers.generation.MaxTimeCriteria.__call__ = gaudi_MaxTimeCriteria_call
389390
transformers.generation.EosTokenCriteria.__call__ = gaudi_EosTokenCriteria_call
390391
transformers.generation.StoppingCriteriaList.__call__ = gaudi_StoppingCriteriaList_call
392+
transformers.pipelines.image_to_text.ImageToTextPipeline._default_generation_config = (
393+
GaudiImageToTextPipeline._default_generation_config
394+
)
391395

392396
# Optimization for BLOOM generation on Gaudi
393397
transformers.models.bloom.modeling_bloom.BloomAttention.forward = gaudi_bloom_attention_forward
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
from .image_to_text import GaudiImageToTextPipeline
Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
from transformers.pipelines.image_to_text import ImageToTextPipeline
2+
3+
from ..generation import GaudiGenerationConfig
4+
5+
6+
class GaudiImageToTextPipeline(ImageToTextPipeline):
7+
_default_generation_config = GaudiGenerationConfig(
8+
max_new_tokens=256,
9+
)

0 commit comments

Comments
 (0)