truncation off
This commit is contained in:
parent
06a5c59a47
commit
fb6290b5a5
3
app.py
3
app.py
|
|
@ -79,8 +79,7 @@ def model_inference(input_dict, history):
|
|||
]
|
||||
prompt = processor.apply_chat_template(resulting_messages, add_generation_prompt=True)
|
||||
|
||||
# Added truncation=True to explicitly activate truncation.
|
||||
inputs = processor(text=prompt, images=[images], return_tensors="pt", truncation=True).to('cuda')
|
||||
inputs = processor(text=prompt, images=[images], return_tensors="pt").to('cuda')
|
||||
|
||||
generation_args = {
|
||||
"input_ids": inputs.input_ids,
|
||||
|
|
|
|||
Loading…
Reference in New Issue