-
Notifications
You must be signed in to change notification settings - Fork 0
/
ilm-2b_watermark.py
30 lines (24 loc) · 1000 Bytes
/
ilm-2b_watermark.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
#!/bin/env python
# This uses InternLM to process 5 512x512 images/second on a 4090
# Pass in a list of images on stdin
# Prints out only those with watermarks
import torch, os
from transformers import AutoModel, AutoTokenizer
torch.set_grad_enabled(False)
model = AutoModel.from_pretrained('internlm/internlm-xcomposer2-vl-1_8b',
trust_remote_code=True).cuda().eval()
tokenizer = AutoTokenizer.from_pretrained('internlm/internlm-xcomposer2-vl-1_8b',
trust_remote_code=True)
query = "<ImageHere> Does this image contain a watermark,copyright,or signature?"
while True:
try:
image_path = input()
except EOFError:
exit()
if image_path == '':
exit()
with torch.no_grad():
with torch.cuda.amp.autocast():
response, _ = model.chat(tokenizer, query=query, image=image_path, history=[], do_sample=False)
if response.strip().lower().startswith("yes"):
print(image_path)