diff --git a/app.py b/app.py index 7e7a02c..546a18d 100644 --- a/app.py +++ b/app.py @@ -79,8 +79,7 @@ def model_inference(input_dict, history): ] prompt = processor.apply_chat_template(resulting_messages, add_generation_prompt=True) - # Added truncation=True to explicitly activate truncation. - inputs = processor(text=prompt, images=[images], return_tensors="pt", truncation=True).to('cuda') + inputs = processor(text=prompt, images=[images], return_tensors="pt").to('cuda') generation_args = { "input_ids": inputs.input_ids,