jodi_fix_code / test_real.py
JiaMao's picture
Upload folder using huggingface_hub
50bc7a2 verified
import os
import sys
import argparse
from pathlib import Path
from PIL import Image
from typing import Any
import torch
import torchvision.transforms as T
from datasets import load_dataset
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
os.environ["GRADIO_TEMP_DIR"] = "./tmp"
from jodi_pipeline import JodiPipeline
from model.postprocess import (
ImagePostProcessor, LineartPostProcessor, EdgePostProcessor, DepthPostProcessor,
NormalPostProcessor, AlbedoPostProcessor, SegADE20KPostProcessor, OpenposePostProcessor,
)
from transformers import (
Qwen2VLForConditionalGeneration,
Qwen2_5_VLForConditionalGeneration,
Qwen3VLForConditionalGeneration,
Qwen3VLMoeForConditionalGeneration
)
from transformers import AutoProcessor, Trainer
from pathlib import Path
import itertools
import ast
import re
from PIL import Image
import json
import re
def clean_eval_question(q: str) -> str:
"""
Clean VQA-style question text for evaluation.
- If lettered options (A–Z) exist, keep text up to the last option.
- Otherwise, keep text up to the first '?' (inclusive).
"""
if not isinstance(q, str):
q = str(q)
# 删除 <image> 占位符
q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE)
# 匹配所有选项(A–Z),兼容多种写法:A. / A) / (A) / A: / A - / A– ...
option_pattern = r"(?:\(?[A-Z]\)?[\.\:\-\)]\s)"
matches = list(re.finditer(option_pattern, q, flags=re.IGNORECASE))
if matches:
# 找到最后一个选项出现位置 → 保留到该选项行的结束处
last_match = matches[-1]
# 找到从最后一个选项开始到该段落结束(如选项内容的末尾)
tail = q[last_match.end():]
# 截断尾部任何额外提示("Please answer..." 等)
tail_cut = re.split(r"(please\s+answer|choose\s+the|select\s+the|answer\s+directly)", tail, flags=re.IGNORECASE)[0]
q = q[:last_match.end()] + tail_cut
else:
# 无选项 → 只保留问句(问号前的部分)
match_qmark = re.search(r"\?", q)
if match_qmark:
q = q[:match_qmark.end()]
else:
q = q.split("\n")[0] # fallback
# 清理多余换行与空格
q = re.sub(r"\n+", " ", q)
q = re.sub(r"\s+", " ", q).strip()
return q
def clean_prompt_question(q: str) -> str:
"""Clean VQA-style question text, keeping only the question stem before '?'. """
if not isinstance(q, str):
q = str(q)
# 删除 <image> 占位符
q = re.sub(r"<\s*image\s*\d+\s*>", "", q, flags=re.IGNORECASE)
# 截取问号之前的部分(包括问号)
match = re.search(r"^(.*?\?)", q)
if match:
q = match.group(1)
else:
# 若无问号则保留首句
q = q.split("\n")[0]
# 去除多余空白与换行
q = re.sub(r"\s+", " ", q).strip()
return q
def dump_image(image, save_root):
os.makedirs(save_root, exist_ok=True)
save_path = os.path.join(save_root, "input.jpg")
image.convert("RGB").save(save_path, format="JPEG", quality=95)
return save_path
def concatenate_images(image_paths, save_path, images_per_row=None, image_format="png"):
""" 将多个图像拼接成一张大图并保存。
Args: image_paths: List[str] 图像路径列表
save_path: 保存路径(包括文件名) images_per_row: 每行图像数量(默认为全部在一行)
image_format: 保存格式
"""
from PIL import Image
import io
# 读取图像
images = [Image.open(p).convert("RGB") for p in image_paths]
if images_per_row is None:
images_per_row = len(images)
# 调整尺寸(可选)
target_size = min(1024, images[0].size[0])
images = [img.resize((target_size, target_size)) for img in images]
# 拼接
widths, heights = zip(*(img.size for img in images))
max_width = max(widths)
rows = (len(images) + images_per_row - 1) // images_per_row
total_height = sum(heights[:images_per_row]) * rows
new_im = Image.new("RGB", (max_width * images_per_row, total_height))
y_offset = 0
for i in range(0, len(images), images_per_row):
row_imgs = images[i:i + images_per_row]
x_offset = 0
for img in row_imgs:
new_im.paste(img, (x_offset, y_offset))
x_offset += max_width
y_offset += heights[0]
os.makedirs(os.path.dirname(save_path), exist_ok=True)
new_im.save(save_path, format=image_format.upper())
print(f"🧩 Saved merged image → {save_path}")
return save_path
def build_vqa_message(root, prompt, question):
"""
Build Qwen3-VL message for multimodal or single-image VQA.
Now explicitly tags each modality image before feeding into Qwen3-VL,
so that the model can distinguish RGB, edge, depth, normal, etc.
"""
root_path = Path(root)
# ---------- 单图像情况 ----------
if root_path.is_file() and root_path.suffix.lower() in [".jpg", ".jpeg", ".png", ".webp"]:
image_path = str(root)
messages = [
{
"role": "user",
"content": [
{"type": "image", "image": image_path},
{"type": "text", "text": f"Answer the follow question:{question} based on the <image>."},
],
}
]
return messages
# ---------- 多模态文件夹情况 ----------
modality_names = [
"image",
"annotation_lineart",
"annotation_edge",
"annotation_depth",
"annotation_normal",
"annotation_albedo",
"annotation_seg_12colors",
# "annotation_openpose",
]
# 检查存在的模态文件
available = []
for name in modality_names:
for ext in [".png", ".jpg", ".jpeg"]:
path = Path(root) / f"{name}{ext}"
if path.exists():
available.append((name, str(path)))
break
# 可读名称映射
readable_map = {
"image": "RGB image",
"annotation_lineart": "line drawing",
"annotation_edge": "edge map",
"annotation_depth": "depth map",
"annotation_normal": "normal map",
"annotation_albedo": "albedo map",
"annotation_seg_12colors": "segmentation map",
# "annotation_openpose": "human pose map",
}
present_modalities = [readable_map[n] for n, _ in available]
text_prompt = (
f"Answer the following question based on multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. "
f"The following caption describes the image in detail: '{prompt}'. "
f"Question:{question}"
)
# ---------- 构建内容序列(模态锚定) ----------
content = []
print(f'available:{available}')
for name, path in available:
readable = readable_map.get(name, "visual input")
# 在每张图像前显式标注模态类型
content.append({"type": "text", "text": f"This is the {readable}."})
content.append({"type": "image", "image": path})
# 最后加入主指令
content.append({"type": "text", "text": text_prompt})
messages = [{"role": "user", "content": content}]
return messages
def build_multimodal_message(root, question, coarse_caption="a generic scene", feedback=""):
"""
Build Qwen3-VL message for multi-modal caption refinement.
Explicitly binds each image to its modality name (RGB, edge, depth, etc.)
so Qwen3-VL can reason over them correctly and refine the caption faithfully.
"""
modality_names = [
"image",
"annotation_lineart",
"annotation_edge",
"annotation_depth",
"annotation_normal",
"annotation_albedo",
"annotation_seg_12colors",
# "annotation_openpose",
]
# --- 检查存在的模态 ---
available = []
for name in modality_names:
for ext in [".png", ".jpg", ".jpeg"]:
path = Path(root) / f"{name}{ext}"
if path.exists():
available.append((name, str(path)))
break
# --- 构建模态说明 ---
readable_map = {
"image": "RGB image",
"annotation_lineart": "line drawing",
"annotation_edge": "edge map",
"annotation_depth": "depth map",
"annotation_normal": "normal map",
"annotation_albedo": "albedo map",
"annotation_seg_12colors": "segmentation map",
# "annotation_openpose": "human pose map",
}
present_modalities = [readable_map[n] for n, _ in available]
# --- 构造文本指令 ---
text_prompt = (
f"You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}. "
f"Use all available modalities jointly to reason about the same scene rather than describing them separately. "
f"Generate an enhanced visual description that focuses on the aspects most relevant to answering the following question: '{question}'. "
f"Your task is to refine the description of the scene based on all visual modalities so that it highlights visual cues "
f"that are crucial for accurately addressing the question, such as object appearance, count, position, or relation, "
f"while maintaining faithfulness to the original visual content. "
f"Do not include any additional commentary or evaluations. "
f"Do NOT introduce any new objects, background environments, emotional tones, or storytelling context. "
f"Focus on describing the visual properties, including: "
f"(1) object category and identity, (2) object attributes such as color, shape, size, and texture, "
f"(3) spatial or relational positioning between objects if present, (4) object part–whole structure or state, and (5) object count or quantity. "
f"Exclude any stylistic, environmental, emotional, or narrative information. "
f"Consider the following feedback when refining your description: '{feedback}'. "
f"Describe the scene in an objective and concise tone, emphasizing the details that help answer the question: '{question}'. "
f"Coarse caption: '{coarse_caption}' "
)
# text_prompt0 = (
# f"You are given multiple visual modalities of the same scene, including: {', '.join(present_modalities)}. "
# f"The **RGB image** provides the most accurate and realistic appearance of the scene, "
# f"while other modalities (e.g., depth, normal, edge, segmentation) offer complementary structural and semantic details.\n\n"
# f"### Your Task:\n"
# f"Generate a refined, detailed, and visually grounded description of the scene shown in the images. "
# f"Use the RGB image as the main reference, and consult other modalities to verify geometry, boundaries, and spatial relations.\n\n"
# f"### Guidelines:\n"
# f"1. Describe what is *visibly present* — objects, materials, lighting, spatial layout, and relationships.\n"
# f"2. Integrate helpful information from auxiliary modalities (e.g., depth for distance, edges for structure).\n"
# f"3. Do NOT invent or assume anything not visually supported.\n"
# f"4. Avoid including any additional commentary or evaluations.\n"
# f"5. You may rephrase and expand upon the coarse caption for clarity and accuracy.\n\n"
# f"### Coarse Caption:\n'{coarse_caption}'\n\n"
# f"### Feedback to Incorporate:\n'{feedback}'\n\n"
# f"Now produce the final refined caption describing the scene based on the multimodal evidence below."
# )
# --- 构建消息内容:在每个图像前加模态标识 ---
content = []
for name, path in available:
readable = readable_map.get(name, "visual input")
content.append({
"type": "text",
"text": f"This is the {readable}, which provides {get_modality_description(name)}."
})
content.append({"type": "image", "image": path})
# 最后附上总任务说明
content.append({"type": "text", "text": text_prompt})
messages = [{"role": "user", "content": content}]
return messages
def get_modality_description(name: str) -> str:
"""为每个模态生成一句说明,用于提示模型理解模态功能"""
desc_map = {
"image": "the main visual appearance of the scene, including color, texture, and lighting",
"annotation_lineart": "structural outlines, object contours, and fine geometry",
"annotation_edge": "strong boundaries and contrast edges between objects",
"annotation_depth": "distance and perspective information for spatial understanding",
"annotation_normal": "surface orientation and geometric curvature cues",
"annotation_albedo": "pure surface color without lighting or shading effects",
"annotation_seg_12colors": "semantic regions and object categories",
"annotation_openpose": "human body keypoints, joints, and orientation",
}
return desc_map.get(name, "complementary visual evidence")
# ------------------------------
# Argument Parser
# ------------------------------
def get_parser():
parser = argparse.ArgumentParser(description="Run JODI inference without Gradio UI.")
parser.add_argument("--text_model_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct',
help="Path to model checkpoint.")
parser.add_argument("--config", type=str, default="./configs/inference.yaml", help="Path to config file.")
parser.add_argument("--model_path", type=str, default='hf://VIPL-GENUN/Jodi/Jodi.pth',
help="Path to model checkpoint.")
parser.add_argument("--model_name_or_path", type=str, default='Qwen/Qwen3-VL-8B-Instruct',
help="Path to model checkpoint.")
parser.add_argument("--data_path", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/images",
help="Prompt text for generation.")
parser.add_argument("--json", type=str, default="/home/efs/mjw/mjw/dataset/dataset/realworldqa/annotations.json",
help="Optional negative prompt.")
parser.add_argument("--temp_dir", type=str, default="/home/efs/mjw/mjw/dataset/dataset/tmp",
help="Prompt text for generation.")
parser.add_argument("--negative_prompt", type=str, default="", help="Optional negative prompt.")
parser.add_argument("--question", type=str, default="how many cars in this image?",
help="Optional negative prompt.")
parser.add_argument("--steps", type=int, default=20, help="Number of inference steps.")
parser.add_argument("--iters", type=int, default=10, help="Number of inference steps.")
parser.add_argument("--guidance_scale", type=float, default=4.5)
parser.add_argument("--seed", type=int, default=42)
parser.add_argument("--output_dir", type=str, default="./vqa_realworld_outputs", help="Directory to save results.")
return parser
# ------------------------------
# Main Inference Function
# ------------------------------
@torch.inference_mode()
def vqa_i2t(model, processor, image_path, question, vqa_id, max_length=300):
messages = [
{
"role": "user",
"content": [
{
"type": "image",
"image": image_path,
},
{"type": "text", "text": f"Answer the follow question:{question} based on the <image>."},
],
}
]
print(messages)
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt"
)
inputs = inputs.to(model.device)
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=max_length)
generated_ids_trimmed = [
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)
os.makedirs(args.output_dir, exist_ok=True)
save_dir = Path(args.output_dir) / str(vqa_id)
save_dir.mkdir(parents=True, exist_ok=True)
caption_path = Path(save_dir) / f"caption.txt"
with open(caption_path, "w", encoding="utf-8") as f:
f.write(output_text[0].strip())
return output_text[0]
@torch.inference_mode()
def init_i2t(model, processor, image_path, iter_num, vqa_id, max_length=300):
messages = [
{
"role": "user",
"content": [
{
"type": "image",
"image": image_path,
},
{"type": "text", "text": f"Describe this image."},
],
}
]
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True, return_dict=True, return_tensors="pt"
)
inputs = inputs.to(model.device)
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=max_length)
generated_ids_trimmed = [
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)
os.makedirs(args.output_dir, exist_ok=True)
save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}"
save_dir.mkdir(parents=True, exist_ok=True)
caption_path = Path(save_dir) / f"caption.txt"
with open(caption_path, "w", encoding="utf-8") as f:
f.write(output_text[0].strip())
return output_text[0]
@torch.inference_mode()
def evaluate_consistency(image_path, model, processor, question, answer, max_length=256):
# --- 构造 Qwen 输入 ---
question = clean_eval_question(question)
eval_prompt = f"""
You are a VQA answer evaluator.
Given an image, a question, and a proposed answer,
score how correct the answer is according to the image evidence.
Then provide one short feedback sentence suggesting what kind of visual information related to {question} or reasoning should be improved
to make the answer more accurate or grounded in the image.
Return JSON strictly:
{{"AnswerScore": <float 0-1>, "Feedback": "<short suggestion>"}}
Question: "{question}"
Answer: "{answer}"
<image>
"""
messages = [
{
"role": "user",
"content": [
{"type": "image", "image": image_path},
{"type": "text", "text": eval_prompt},
],
}
]
# --- 推理 ---
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt"
).to(model.device)
out_ids = model.generate(**inputs, max_new_tokens=max_length)
out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)]
text = processor.batch_decode(out_trim, skip_special_tokens=True)[0]
# --- 解析输出 ---
try:
data = json.loads(re.search(r"\{.*\}", text, re.S).group(0))
score = float(data.get("AnswerScore", 0))
feedback = data.get("Feedback", "")
except Exception:
score, feedback = 0.0, text.strip()
print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}")
return score, feedback
@torch.inference_mode()
def evaluate_multimodal_consistency(root, model, processor, question, answer, max_length=256):
"""
Evaluate VQA answer correctness using all available modalities (not just RGB).
This reduces model bias and improves visual grounding reliability.
"""
# 检查存在的模态文件
modality_names = [
"image", "annotation_lineart", "annotation_edge",
"annotation_depth", "annotation_normal", "annotation_albedo",
"annotation_seg_12colors", "annotation_openpose"
]
available = []
for name in modality_names:
for ext in [".png", ".jpg", ".jpeg"]:
path = Path(root) / f"{name}{ext}"
if path.exists():
available.append((name, str(path)))
break
# 可读映射
readable_map = {
"image": "RGB image",
"annotation_lineart": "line drawing",
"annotation_edge": "edge map",
"annotation_depth": "depth map",
"annotation_normal": "normal map",
"annotation_albedo": "albedo map",
"annotation_seg_12colors": "segmentation map",
"annotation_openpose": "human pose map",
}
present_modalities = [readable_map[n] for n, _ in available]
# 构造 prompt
eval_prompt = f"""
You are a multimodal visual reasoning evaluator.
You are given multiple complementary visual modalities of the same scene, including: {', '.join(present_modalities)}.
Your task is to judge **how correct and visually grounded** the given answer is for the question,
based purely on visual evidence from all modalities.
Follow this process:
1. Identify the key visual concepts mentioned in the question (e.g., objects, counts, relations, colors).
2. Check whether these visual concepts are **clearly supported** or **contradicted** by the modalities.
3. If the question is multiple-choice (options A, B, C...), identify which one best matches the evidence.
4. Otherwise, directly evaluate how accurate the free-form answer is.
5. Penalize any parts that contradict the image, or ignore modalities.
Return JSON strictly:
{{
"AnswerScore": <float between 0 and 1>,
"Feedback": "<short and specific suggestion mentioning what aspect (e.g., object count, relation, visibility) could be improved>"
}}
Question: "{question}"
Answer: "{answer}"
"""
# 构建内容序列(模态+图像)
content = []
for name, path in available:
readable = readable_map.get(name, "visual input")
content.append({"type": "text", "text": f"This is the {readable}."})
content.append({"type": "image", "image": path})
content.append({"type": "text", "text": eval_prompt})
messages = [{"role": "user", "content": content}]
# --- 推理 ---
inputs = processor.apply_chat_template(
messages, tokenize=True, add_generation_prompt=True,
return_dict=True, return_tensors="pt"
).to(model.device)
out_ids = model.generate(**inputs, max_new_tokens=max_length)
out_trim = [o[len(i):] for i, o in zip(inputs.input_ids, out_ids)]
text = processor.batch_decode(out_trim, skip_special_tokens=True)[0]
# --- 解析输出 ---
try:
data = json.loads(re.search(r"\{.*\}", text, re.S).group(0))
score = float(data.get("AnswerScore", 0))
feedback = data.get("Feedback", "")
except Exception:
score, feedback = 0.0, text.strip()
print(f"🧮 [AnswerScore] {score:.3f} | Feedback: {feedback}")
return score, feedback
@torch.inference_mode()
def text_refine(root, model, processor, prompt, question, feedback, iter_num, vqa_id, max_length=300):
question = clean_prompt_question(question)
messages = build_multimodal_message(root, question, prompt, feedback)
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt"
)
inputs = inputs.to(model.device)
# Inference: Generation of the output
generated_ids = model.generate(**inputs, max_new_tokens=max_length)
generated_ids_trimmed = [
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)
os.makedirs(args.output_dir, exist_ok=True)
save_dir = Path(args.output_dir) / vqa_id / f"iteration_{iter_num}"
save_dir.mkdir(parents=True, exist_ok=True)
caption_path = Path(save_dir) / f"caption.txt"
with open(caption_path, "w", encoding="utf-8") as f:
f.write(output_text[0].strip())
return output_text[0]
@torch.inference_mode()
def vqa(root, model, processor, prompt, question, vqa_id, step, max_length=300):
messages = build_vqa_message(root, prompt, question)
print(messages)
inputs = processor.apply_chat_template(
messages,
tokenize=True,
add_generation_prompt=True,
return_dict=True,
return_tensors="pt"
)
inputs = inputs.to(model.device)
generated_ids = model.generate(**inputs, max_new_tokens=max_length)
generated_ids_trimmed = [
out_ids[len(in_ids):] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)]
output_text = processor.batch_decode(
generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
)
print(output_text)
os.makedirs(args.output_dir, exist_ok=True)
save_dir = Path(args.output_dir) / vqa_id / f'iteration_{step}' / 'vqa_answer'
save_dir.mkdir(parents=True, exist_ok=True)
caption_path = Path(save_dir) / f"caption.txt"
with open(caption_path, "w", encoding="utf-8") as f:
f.write(output_text[0].strip())
return output_text[0]
@torch.inference_mode()
def image_refine(prompt, images, role, pipe, iter_num, modality_names, generator, height, width, image_id):
# print(f"🚀 Generating with prompt: {prompt}")
outputs = pipe(
images=images,
role=role,
prompt=prompt,
negative_prompt=args.negative_prompt,
height=height,
width=width,
num_inference_steps=args.steps,
guidance_scale=args.guidance_scale,
num_images_per_prompt=1,
generator=generator,
task='t2i'
)
# Apply post-processing for each modality
results = [post_processors[i](outputs[i]) for i in range(1 + pipe.num_conditions)]
results = torch.stack(results, dim=1).reshape(-1, 3, height, width)
results = [T.ToPILImage()(res).convert("RGB") for res in results.unbind(0)]
# --------------------------
# Save results
# --------------------------
os.makedirs(args.output_dir, exist_ok=True)
save_dir = Path(args.output_dir) / image_id / f"iteration_{iter_num}"
save_dir.mkdir(parents=True, exist_ok=True)
for idx, img in enumerate(results):
name = modality_names[idx]
save_path = save_dir / f"{name}.png"
img.save(save_path)
print(f"💾 Saved {name}{save_path}")
merged_path = save_dir / f"merged_iteration_{iter_num}.png"
concatenate_images([save_dir / f"{name}.png" for name in modality_names], merged_path)
print(f"\n✅ All results saved in: {save_dir}\n")
return save_dir
if __name__ == "__main__":
args = get_parser().parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(f"✅ Using device: {device}")
processor = AutoProcessor.from_pretrained(
args.model_name_or_path,
)
model = Qwen3VLForConditionalGeneration.from_pretrained(
args.text_model_path,
attn_implementation="flash_attention_2",
dtype=(torch.bfloat16),
).to(device)
pipe = JodiPipeline(args.config)
pipe.from_pretrained(args.model_path)
modality_names = [
"image",
"annotation_lineart",
"annotation_edge",
"annotation_depth",
"annotation_normal",
"annotation_albedo",
"annotation_seg_12colors",
"annotation_openpose",
]
# Build post-processors
post_processors: list[Any] = [ImagePostProcessor()]
for condition in pipe.config.conditions: # type: ignore
if condition == "lineart":
post_processors.append(LineartPostProcessor())
elif condition == "edge":
post_processors.append(EdgePostProcessor())
elif condition == "depth":
post_processors.append(DepthPostProcessor())
elif condition == "normal":
post_processors.append(NormalPostProcessor())
elif condition == "albedo":
post_processors.append(AlbedoPostProcessor())
elif condition == "segmentation":
post_processors.append(SegADE20KPostProcessor(color_scheme="colors12", only_return_image=True))
elif condition == "openpose":
post_processors.append(OpenposePostProcessor())
else:
print(f"⚠️ Warning: Unknown condition: {condition}")
post_processors.append(ImagePostProcessor())
torch.manual_seed(args.seed)
generator = torch.Generator(device=device).manual_seed(args.seed)
with open(args.json, "r", encoding="utf-8") as f:
annotations = json.load(f)
for sample in annotations[15:306]:
image_path = os.path.join(args.data_path, sample["image"])
image_id = sample["image"].split('.')[0]
image = Image.open(image_path)
question = sample["question"]
control_images = [image.convert('RGB')] + [None] * pipe.num_conditions
role = [1] + [0] * pipe.num_conditions
print(role)
best_result, best_score = '', 0.0
max_length = 1024
# input_img = Image.open(image_path).convert("RGB")
width, height = image.size
print(f'ori width:{width}', f'ori height:{height}')
prompt = init_i2t(model, processor, image_path, 0, image_id, max_length)
result = vqa_i2t(model, processor, image_path, question, 100, max_length)
score, feedback = evaluate_consistency(image_path, model, processor, question, result)
if score >= best_score:
best_result, best_score = result, score
for step in range(1, args.iters):
save_dir = image_refine(prompt, control_images, role, pipe, step, modality_names, generator, height, width,
image_id)
max_length += 100
prompt = text_refine(save_dir, model, processor, prompt, question, feedback, step, image_id, max_length)
result = vqa(save_dir, model, processor, prompt, question, image_id, step, max_length)
score, feedback = evaluate_multimodal_consistency(save_dir, model, processor, question, result)
if score >= best_score:
best_result, best_score = result, score
os.makedirs(args.output_dir, exist_ok=True)
save_dir = Path(args.output_dir) / image_id / f'iteration_best' / 'vqa_answer'
save_dir.mkdir(parents=True, exist_ok=True)
caption_path = Path(save_dir) / f"caption.txt"
with open(caption_path, "w", encoding="utf-8") as f:
f.write(best_result)
print(best_result)