From fb6290b5a5d7ffd4e3563c2c0a5f860fc97aecb4 Mon Sep 17 00:00:00 2001 From: Maurizio Dipierro Date: Wed, 19 Mar 2025 17:08:31 +0100 Subject: [PATCH] truncation off --- app.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/app.py b/app.py index 7e7a02c..546a18d 100644 --- a/app.py +++ b/app.py @@ -79,8 +79,7 @@ def model_inference(input_dict, history): ] prompt = processor.apply_chat_template(resulting_messages, add_generation_prompt=True) - # Added truncation=True to explicitly activate truncation. - inputs = processor(text=prompt, images=[images], return_tensors="pt", truncation=True).to('cuda') + inputs = processor(text=prompt, images=[images], return_tensors="pt").to('cuda') generation_args = { "input_ids": inputs.input_ids,