truncation true
This commit is contained in:
parent
6023f099f8
commit
06a5c59a47
13
app.py
13
app.py
|
|
@ -41,10 +41,11 @@ def normalize_values(text, target_max=500):
|
|||
return normalized_text
|
||||
|
||||
processor = AutoProcessor.from_pretrained("ds4sd/SmolDocling-256M-preview")
|
||||
model = AutoModelForVision2Seq.from_pretrained("ds4sd/SmolDocling-256M-preview",
|
||||
torch_dtype=torch.bfloat16,
|
||||
# _attn_implementation="flash_attention_2"
|
||||
).to("cuda")
|
||||
model = AutoModelForVision2Seq.from_pretrained(
|
||||
"ds4sd/SmolDocling-256M-preview",
|
||||
torch_dtype=torch.bfloat16,
|
||||
# _attn_implementation="flash_attention_2"
|
||||
).to("cuda")
|
||||
|
||||
def model_inference(input_dict, history):
|
||||
text = input_dict["text"]
|
||||
|
|
@ -77,7 +78,9 @@ def model_inference(input_dict, history):
|
|||
}
|
||||
]
|
||||
prompt = processor.apply_chat_template(resulting_messages, add_generation_prompt=True)
|
||||
inputs = processor(text=prompt, images=[images], return_tensors="pt").to('cuda')
|
||||
|
||||
# Added truncation=True to explicitly activate truncation.
|
||||
inputs = processor(text=prompt, images=[images], return_tensors="pt", truncation=True).to('cuda')
|
||||
|
||||
generation_args = {
|
||||
"input_ids": inputs.input_ids,
|
||||
|
|
|
|||
Loading…
Reference in New Issue