File size: 4,547 Bytes
b089945 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 |
import argparse
import glob
import json
import os
import torch
from accelerate import PartialState
from PIL import Image
from tqdm import tqdm
from transformers import AutoModel, AutoTokenizer
class caption_processor:
def __init__(self, vlm_name, device):
self.vlm = AutoModel.from_pretrained(
vlm_name,
trust_remote_code=True,
attn_implementation="flash_attention_2",
torch_dtype=torch.bfloat16,
)
self.vlm_tokenizer = AutoTokenizer.from_pretrained(
vlm_name, trust_remote_code=True
)
self.vlm = self.vlm.eval().to(device)
self.prompt = """
1. describe the image in brief, Avoid using phrases in [In the/The image/scene shows/contains/is a] in the captions, directly describe the contents.
2. Imagine this picture is the first frame of a 5-second video. Please describe the video and add dynamics, including the movement of objects and themes, as well as the overall camera movement.Avoid using phrases in [In the/The video/scene shows/contains/is a] in the descriptions, directly describe the contents.
3. Please output in JSON format.{"caption": "...","video_description": "..."}
""" # noqa: E501
def str_2_json(self, str):
# Find the first occurrence of '{'
start_idx = str.find("{")
if start_idx == -1:
return None
# Find the last occurrence of '}'
end_idx = str.rfind("}")
if end_idx == -1 or end_idx <= start_idx:
return None
# Extract the JSON string
json_str = str[start_idx : end_idx + 1]
# Load and return the JSON
try:
import json
return json.loads(json_str)
except json.JSONDecodeError:
return None
def process(self, image):
msgs = [{"role": "user", "content": [image, self.prompt]}]
answer = self.vlm.chat(
msgs=msgs, tokenizer=self.vlm_tokenizer, enable_thinking=False, stream=False
)
dict_answer = self.str_2_json(answer)
if dict_answer is None:
return {"response": answer}
return dict_answer
def get_images_from_path(path):
if os.path.isdir(path):
return glob.glob(os.path.join(path, "*.jpg")) + glob.glob(
os.path.join(path, "*.png")
)
elif os.path.isfile(path) and (path.endswith(".jpg") or path.endswith(".png")):
return [path]
else:
return []
def parse_args():
parser = argparse.ArgumentParser(description="Caption processor")
parser.add_argument("--vlm_name", type=str, required=True)
parser.add_argument("--output_dir", type=str, required=True)
parser.add_argument("--paths", type=str, required=True, nargs="+")
return parser.parse_args()
if __name__ == "__main__":
distributed_state = PartialState()
args = parse_args()
output_dir = args.output_dir
os.makedirs(output_dir, exist_ok=True)
vlm_name = args.vlm_name
paths = args.paths
all_paths = []
for path in paths:
images = get_images_from_path(path)
all_paths.extend(images)
print("found", len(all_paths), "images")
processor = caption_processor(
vlm_name,
distributed_state.device,
)
with distributed_state.split_between_processes(
all_paths, apply_padding=False
) as batched_paths:
print("GPU", distributed_state.device, "found", len(batched_paths), "images")
for path in tqdm(batched_paths, desc="Processing images"):
try:
json_path = os.path.join(output_dir, os.path.basename(path) + ".json")
if os.path.exists(json_path):
print(f"File {json_path} already exists, skipping.")
continue
image = Image.open(path)
output = None
for _ in range(3):
output = processor.process(image)
if output is not None:
break
if output is None:
raise Exception("Failed to process image after 3 attempts")
else:
with open(
json_path,
"w",
encoding="utf-8",
) as f:
json.dump(output, f, ensure_ascii=False, indent=2)
except Exception as e:
print(f"Error processing {path}: {e}")
|