Upload 103 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- test_scripts/AnimeGANv3_01/test_script.py +126 -0
- test_scripts/AnimeGANv3_02/test_script.py +125 -0
- test_scripts/AnimeGANv3_03/test_script.py +197 -0
- test_scripts/DeOldify_01/test_script.py +119 -0
- test_scripts/DeOldify_02/test_script.py +113 -0
- test_scripts/DeOldify_03/test_script.py +102 -0
- test_scripts/DeScratch_01/detection.py +178 -0
- test_scripts/DeScratch_01/test_script.py +133 -0
- test_scripts/DeScratch_02/test_script.py +121 -0
- test_scripts/DeScratch_03/test_script.py +97 -0
- test_scripts/Eparse_01/test_script.py +418 -0
- test_scripts/Eparse_02/test_script.py +360 -0
- test_scripts/Eparse_03/test_script.py +224 -0
- test_scripts/Faker_01/test_script.py +78 -0
- test_scripts/Faker_02/test_script.py +118 -0
- test_scripts/Faker_03/test_script.py +112 -0
- test_scripts/FunASR_01/test_script.py +122 -0
- test_scripts/FunASR_02/test_script.py +134 -0
- test_scripts/FunASR_03/test_script.py +135 -0
- test_scripts/InvisibleWatermark_01/imwatermark/__init__.py +1 -0
- test_scripts/InvisibleWatermark_01/imwatermark/dwtDctSvd.py +108 -0
- test_scripts/InvisibleWatermark_01/imwatermark/maxDct.py +134 -0
- test_scripts/InvisibleWatermark_01/imwatermark/rivaGan.py +69 -0
- test_scripts/InvisibleWatermark_01/imwatermark/rivagan_decoder.onnx +3 -0
- test_scripts/InvisibleWatermark_01/imwatermark/rivagan_encoder.onnx +3 -0
- test_scripts/InvisibleWatermark_01/imwatermark/watermark.py +171 -0
- test_scripts/InvisibleWatermark_01/test_script.py +100 -0
- test_scripts/InvisibleWatermark_02/imwatermark/__init__.py +1 -0
- test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/__init__.cpython-312.pyc +0 -0
- test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/__init__.cpython-37.pyc +0 -0
- test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/__init__.cpython-38.pyc +0 -0
- test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/dwtDctSvd.cpython-312.pyc +0 -0
- test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/maxDct.cpython-312.pyc +0 -0
- test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/maxDct.cpython-37.pyc +0 -0
- test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/maxDct.cpython-38.pyc +0 -0
- test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/rivaGan.cpython-312.pyc +0 -0
- test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/watermark.cpython-312.pyc +0 -0
- test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/watermark.cpython-37.pyc +0 -0
- test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/watermark.cpython-38.pyc +0 -0
- test_scripts/InvisibleWatermark_02/imwatermark/dwtDctSvd.py +108 -0
- test_scripts/InvisibleWatermark_02/imwatermark/maxDct.py +134 -0
- test_scripts/InvisibleWatermark_02/imwatermark/rivaGan.py +69 -0
- test_scripts/InvisibleWatermark_02/imwatermark/rivagan_decoder.onnx +3 -0
- test_scripts/InvisibleWatermark_02/imwatermark/rivagan_encoder.onnx +3 -0
- test_scripts/InvisibleWatermark_02/imwatermark/watermark.py +171 -0
- test_scripts/InvisibleWatermark_02/test_script.py +83 -0
- test_scripts/InvisibleWatermark_03/imwatermark/__init__.py +1 -0
- test_scripts/InvisibleWatermark_03/imwatermark/__pycache__/__init__.cpython-311.pyc +0 -0
- test_scripts/InvisibleWatermark_03/imwatermark/__pycache__/__init__.cpython-312.pyc +0 -0
- test_scripts/InvisibleWatermark_03/imwatermark/__pycache__/__init__.cpython-37.pyc +0 -0
test_scripts/AnimeGANv3_01/test_script.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
import argparse
|
| 5 |
+
import json
|
| 6 |
+
import datetime
|
| 7 |
+
import cv2
|
| 8 |
+
import torch
|
| 9 |
+
import lpips
|
| 10 |
+
from torchvision import transforms
|
| 11 |
+
import torch.nn.functional as F
|
| 12 |
+
from PIL import Image, UnidentifiedImageError
|
| 13 |
+
|
| 14 |
+
def verify_image(path, exts=('.png','.jpg','.jpeg','.webp')):
|
| 15 |
+
"""Check file existence, non-empty, valid extension, and PIL readability."""
|
| 16 |
+
if not os.path.isfile(path):
|
| 17 |
+
return False, f'File does not exist: {path}'
|
| 18 |
+
if os.path.getsize(path) == 0:
|
| 19 |
+
return False, f'File is empty: {path}'
|
| 20 |
+
if not path.lower().endswith(exts):
|
| 21 |
+
return False, f'Unsupported format: {path}'
|
| 22 |
+
try:
|
| 23 |
+
img = Image.open(path)
|
| 24 |
+
img.verify()
|
| 25 |
+
except (UnidentifiedImageError, Exception) as e:
|
| 26 |
+
return False, f'Failed to read image: {path} ({e})'
|
| 27 |
+
return True, ''
|
| 28 |
+
|
| 29 |
+
def load_tensor(path):
|
| 30 |
+
"""Load and normalize Tensor to [-1,1] as per original script"""
|
| 31 |
+
img = cv2.imread(path, cv2.IMREAD_COLOR)
|
| 32 |
+
if img is None:
|
| 33 |
+
raise RuntimeError(f'cv2 read failed: {path}')
|
| 34 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 35 |
+
t = transforms.ToTensor()(img) * 2 - 1
|
| 36 |
+
return t.unsqueeze(0)
|
| 37 |
+
|
| 38 |
+
def main():
|
| 39 |
+
p = argparse.ArgumentParser(description='Automated anime effect evaluation script')
|
| 40 |
+
p.add_argument('--groundtruth', required=True, help='Original image path')
|
| 41 |
+
p.add_argument('--output', required=True, help='Anime-styled output image path')
|
| 42 |
+
p.add_argument('--lpips-thresh', type=float, default=0.30,
|
| 43 |
+
help='LPIPS distance threshold (Pass if >= threshold)')
|
| 44 |
+
p.add_argument('--clip-thresh', type=float, default=0.25,
|
| 45 |
+
help='CLIP Hayao style similarity threshold (Pass if > threshold)')
|
| 46 |
+
p.add_argument('--result', required=True, help='Result JSONL file path (append mode)')
|
| 47 |
+
args = p.parse_args()
|
| 48 |
+
|
| 49 |
+
process = True
|
| 50 |
+
comments = []
|
| 51 |
+
|
| 52 |
+
# 1. Validate input/output files
|
| 53 |
+
for tag, path in [('input', args.groundtruth), ('output', args.output)]:
|
| 54 |
+
ok, msg = verify_image(path)
|
| 55 |
+
if not ok:
|
| 56 |
+
process = False
|
| 57 |
+
comments.append(f'[{tag}] {msg}')
|
| 58 |
+
|
| 59 |
+
# 2. Calculate LPIPS (only if process==True)
|
| 60 |
+
lpips_val = None
|
| 61 |
+
result_flag = False
|
| 62 |
+
if process:
|
| 63 |
+
try:
|
| 64 |
+
img0 = load_tensor(args.groundtruth)
|
| 65 |
+
img1 = load_tensor(args.output)
|
| 66 |
+
# Align dimensions
|
| 67 |
+
_, _, h0, w0 = img0.shape
|
| 68 |
+
_, _, h1, w1 = img1.shape
|
| 69 |
+
nh, nw = min(h0,h1), min(w0,w1)
|
| 70 |
+
if (h0,w0) != (nh,nw):
|
| 71 |
+
img0 = F.interpolate(img0, size=(nh,nw), mode='bilinear', align_corners=False)
|
| 72 |
+
if (h1,w1) != (nh,nw):
|
| 73 |
+
img1 = F.interpolate(img1, size=(nh,nw), mode='bilinear', align_corners=False)
|
| 74 |
+
|
| 75 |
+
loss_fn = lpips.LPIPS(net='vgg').to(torch.device('cpu'))
|
| 76 |
+
with torch.no_grad():
|
| 77 |
+
lpips_val = float(loss_fn(img0, img1).item())
|
| 78 |
+
|
| 79 |
+
passed = lpips_val >= args.lpips_thresh
|
| 80 |
+
comments.append(f'LPIPS={lpips_val:.4f} (>= {args.lpips_thresh} → {"OK" if passed else "FAIL"})')
|
| 81 |
+
result_flag = passed
|
| 82 |
+
|
| 83 |
+
except Exception as e:
|
| 84 |
+
process = False
|
| 85 |
+
comments.append(f'Metric calculation error: {e}')
|
| 86 |
+
|
| 87 |
+
# 3. CLIP Hayao style similarity check (only if process==True)
|
| 88 |
+
if process:
|
| 89 |
+
try:
|
| 90 |
+
import clip
|
| 91 |
+
import PIL.Image
|
| 92 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 93 |
+
clip_model, clip_preprocess = clip.load("ViT-B/32", device=device)
|
| 94 |
+
|
| 95 |
+
image = clip_preprocess(PIL.Image.open(args.output)).unsqueeze(0).to(device)
|
| 96 |
+
hayao_tokens = clip.tokenize(["a landscape in Hayao Miyazaki anime style"]).to(device)
|
| 97 |
+
|
| 98 |
+
with torch.no_grad():
|
| 99 |
+
image_features = clip_model.encode_image(image)
|
| 100 |
+
text_features = clip_model.encode_text(hayao_tokens)
|
| 101 |
+
|
| 102 |
+
image_features /= image_features.norm(dim=-1, keepdim=True)
|
| 103 |
+
text_features /= text_features.norm(dim=-1, keepdim=True)
|
| 104 |
+
|
| 105 |
+
hayao_score = (image_features @ text_features.T).item()
|
| 106 |
+
|
| 107 |
+
passed = hayao_score > args.clip_thresh
|
| 108 |
+
comments.append(f"CLIP Hayao style score = {hayao_score:.3f} (threshold = {args.clip_thresh} → {'OK' if passed else 'FAIL'})")
|
| 109 |
+
result_flag = result_flag and passed
|
| 110 |
+
|
| 111 |
+
except Exception as e:
|
| 112 |
+
comments.append(f"CLIP style check failed: {e}")
|
| 113 |
+
|
| 114 |
+
# 4. Write JSONL result
|
| 115 |
+
entry = {
|
| 116 |
+
"Process": process,
|
| 117 |
+
"Result": result_flag,
|
| 118 |
+
"TimePoint": datetime.datetime.now().isoformat(sep='T', timespec='seconds'),
|
| 119 |
+
"comments": "; ".join(comments)
|
| 120 |
+
}
|
| 121 |
+
os.makedirs(os.path.dirname(args.result) or '.', exist_ok=True)
|
| 122 |
+
with open(args.result, 'a', encoding='utf-8') as f:
|
| 123 |
+
f.write(json.dumps(entry, ensure_ascii=False, default=str) + "\n")
|
| 124 |
+
|
| 125 |
+
if __name__ == "__main__":
|
| 126 |
+
main()
|
test_scripts/AnimeGANv3_02/test_script.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import os
|
| 3 |
+
import sys
|
| 4 |
+
import argparse
|
| 5 |
+
import json
|
| 6 |
+
import datetime
|
| 7 |
+
import cv2
|
| 8 |
+
import torch
|
| 9 |
+
import lpips
|
| 10 |
+
from torchvision import transforms
|
| 11 |
+
import torch.nn.functional as F
|
| 12 |
+
from PIL import Image, UnidentifiedImageError
|
| 13 |
+
|
| 14 |
+
def verify_image(path, exts=('.png','.jpg','.jpeg','.webp')):
|
| 15 |
+
if not os.path.isfile(path):
|
| 16 |
+
return False, f'File does not exist: {path}'
|
| 17 |
+
if os.path.getsize(path) == 0:
|
| 18 |
+
return False, f'File is empty: {path}'
|
| 19 |
+
if not path.lower().endswith(exts):
|
| 20 |
+
return False, f'Unsupported format: {path}'
|
| 21 |
+
try:
|
| 22 |
+
img = Image.open(path)
|
| 23 |
+
img.verify()
|
| 24 |
+
except (UnidentifiedImageError, Exception) as e:
|
| 25 |
+
return False, f'Failed to read image: {path} ({e})'
|
| 26 |
+
return True, ''
|
| 27 |
+
|
| 28 |
+
def load_tensor(path):
|
| 29 |
+
img = cv2.imread(path, cv2.IMREAD_COLOR)
|
| 30 |
+
if img is None:
|
| 31 |
+
raise RuntimeError(f'cv2 read failed: {path}')
|
| 32 |
+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
| 33 |
+
t = transforms.ToTensor()(img) * 2 - 1
|
| 34 |
+
return t.unsqueeze(0)
|
| 35 |
+
|
| 36 |
+
def main():
|
| 37 |
+
p = argparse.ArgumentParser(description='Automated anime effect evaluation script')
|
| 38 |
+
p.add_argument('--groundtruth', required=True, help='Original image path')
|
| 39 |
+
p.add_argument('--output', required=True, help='Anime-styled output image path')
|
| 40 |
+
p.add_argument('--lpips-thresh', type=float, default=0.40,
|
| 41 |
+
help='LPIPS structural similarity max distance (Pass if <= threshold)')
|
| 42 |
+
p.add_argument('--clip-thresh', type=float, default=0.25,
|
| 43 |
+
help='CLIP anime style similarity threshold (Pass if > threshold)')
|
| 44 |
+
p.add_argument('--result', required=True, help='Result JSONL file path (append mode)')
|
| 45 |
+
args = p.parse_args()
|
| 46 |
+
|
| 47 |
+
process = True
|
| 48 |
+
comments = []
|
| 49 |
+
|
| 50 |
+
# 1. Validate input/output files
|
| 51 |
+
for tag, path in [('input', args.groundtruth), ('output', args.output)]:
|
| 52 |
+
ok, msg = verify_image(path)
|
| 53 |
+
if not ok:
|
| 54 |
+
process = False
|
| 55 |
+
comments.append(f'[{tag}] {msg}')
|
| 56 |
+
|
| 57 |
+
lpips_val = None
|
| 58 |
+
lpips_pass = True
|
| 59 |
+
clip_pass = False
|
| 60 |
+
if process:
|
| 61 |
+
try:
|
| 62 |
+
# 2. LPIPS structure preservation check
|
| 63 |
+
img0 = load_tensor(args.groundtruth)
|
| 64 |
+
img1 = load_tensor(args.output)
|
| 65 |
+
_, _, h0, w0 = img0.shape
|
| 66 |
+
_, _, h1, w1 = img1.shape
|
| 67 |
+
nh, nw = min(h0,h1), min(w0,w1)
|
| 68 |
+
img0 = F.interpolate(img0, size=(nh,nw), mode='bilinear', align_corners=False)
|
| 69 |
+
img1 = F.interpolate(img1, size=(nh,nw), mode='bilinear', align_corners=False)
|
| 70 |
+
|
| 71 |
+
loss_fn = lpips.LPIPS(net='vgg').to(torch.device('cpu'))
|
| 72 |
+
with torch.no_grad():
|
| 73 |
+
lpips_val = float(loss_fn(img0, img1).item())
|
| 74 |
+
lpips_pass = lpips_val <= args.lpips_thresh
|
| 75 |
+
comments.append(f'LPIPS={lpips_val:.4f} (<= {args.lpips_thresh} → {"OK" if lpips_pass else "FAIL"})')
|
| 76 |
+
except Exception as e:
|
| 77 |
+
process = False
|
| 78 |
+
comments.append(f'Metric calculation error: {e}')
|
| 79 |
+
|
| 80 |
+
if process:
|
| 81 |
+
try:
|
| 82 |
+
import clip
|
| 83 |
+
import PIL.Image
|
| 84 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
| 85 |
+
clip_model, clip_preprocess = clip.load("ViT-B/32", device=device)
|
| 86 |
+
|
| 87 |
+
image = clip_preprocess(PIL.Image.open(args.output)).unsqueeze(0).to(device)
|
| 88 |
+
prompt_list = [
|
| 89 |
+
"anime-style photo",
|
| 90 |
+
"cartoon photo",
|
| 91 |
+
"anime drawing",
|
| 92 |
+
"photo in manga style",
|
| 93 |
+
"Hayao Miyazaki anime style"
|
| 94 |
+
]
|
| 95 |
+
tokens = clip.tokenize(prompt_list).to(device)
|
| 96 |
+
|
| 97 |
+
with torch.no_grad():
|
| 98 |
+
image_features = clip_model.encode_image(image)
|
| 99 |
+
text_features = clip_model.encode_text(tokens)
|
| 100 |
+
image_features /= image_features.norm(dim=-1, keepdim=True)
|
| 101 |
+
text_features /= text_features.norm(dim=-1, keepdim=True)
|
| 102 |
+
scores = (image_features @ text_features.T).squeeze(0)
|
| 103 |
+
best_score = scores.max().item()
|
| 104 |
+
|
| 105 |
+
clip_pass = best_score > args.clip_thresh
|
| 106 |
+
comments.append(f'CLIP best anime style score = {best_score:.3f} (>{args.clip_thresh} → {"OK" if clip_pass else "FAIL"})')
|
| 107 |
+
|
| 108 |
+
except Exception as e:
|
| 109 |
+
comments.append(f"CLIP style check failed: {e}")
|
| 110 |
+
|
| 111 |
+
result_flag = process and lpips_pass and clip_pass
|
| 112 |
+
|
| 113 |
+
# 4. Write JSONL result
|
| 114 |
+
entry = {
|
| 115 |
+
"Process": process,
|
| 116 |
+
"Result": result_flag,
|
| 117 |
+
"TimePoint": datetime.datetime.now().isoformat(sep='T', timespec='seconds'),
|
| 118 |
+
"comments": "; ".join(comments)
|
| 119 |
+
}
|
| 120 |
+
os.makedirs(os.path.dirname(args.result) or '.', exist_ok=True)
|
| 121 |
+
with open(args.result, 'a', encoding='utf-8') as f:
|
| 122 |
+
f.write(json.dumps(entry, ensure_ascii=False, default=str) + "\n")
|
| 123 |
+
|
| 124 |
+
if __name__ == "__main__":
|
| 125 |
+
main()
|
test_scripts/AnimeGANv3_03/test_script.py
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import cv2
|
| 3 |
+
import numpy as np
|
| 4 |
+
from skimage.metrics import structural_similarity as ssim
|
| 5 |
+
import torch
|
| 6 |
+
import torchvision.models as models
|
| 7 |
+
import torchvision.transforms as transforms
|
| 8 |
+
from scipy.linalg import sqrtm
|
| 9 |
+
import os
|
| 10 |
+
import json
|
| 11 |
+
from datetime import datetime
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def extract_frames(video_path, max_frames=100):
|
| 15 |
+
"""Extract frames from video, limiting maximum frames for efficiency"""
|
| 16 |
+
cap = cv2.VideoCapture(video_path)
|
| 17 |
+
frames = []
|
| 18 |
+
count = 0
|
| 19 |
+
while cap.isOpened() and count < max_frames:
|
| 20 |
+
ret, frame = cap.read()
|
| 21 |
+
if not ret:
|
| 22 |
+
break
|
| 23 |
+
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 24 |
+
frames.append(frame)
|
| 25 |
+
count += 1
|
| 26 |
+
cap.release()
|
| 27 |
+
return frames
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def compute_ssim(input_frames, output_frames):
|
| 31 |
+
"""Calculate average SSIM between input and output frames"""
|
| 32 |
+
ssim_scores = []
|
| 33 |
+
for in_frame, out_frame in zip(input_frames, output_frames):
|
| 34 |
+
# Resize frames if dimensions don't match
|
| 35 |
+
min_shape = (min(in_frame.shape[0], out_frame.shape[0]),
|
| 36 |
+
min(in_frame.shape[1], out_frame.shape[1]))
|
| 37 |
+
in_frame = cv2.resize(in_frame, min_shape[::-1])
|
| 38 |
+
out_frame = cv2.resize(out_frame, min_shape[::-1])
|
| 39 |
+
# Convert to grayscale for SSIM calculation
|
| 40 |
+
in_gray = cv2.cvtColor(in_frame, cv2.COLOR_RGB2GRAY)
|
| 41 |
+
out_gray = cv2.cvtColor(out_frame, cv2.COLOR_RGB2GRAY)
|
| 42 |
+
score = ssim(in_gray, out_gray, data_range=255)
|
| 43 |
+
ssim_scores.append(score)
|
| 44 |
+
return np.mean(ssim_scores) if ssim_scores else 0.0
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def get_inception_features(frames, model, transform, device):
|
| 48 |
+
"""Extract Inception V3 features for FID calculation"""
|
| 49 |
+
features = []
|
| 50 |
+
for frame in frames:
|
| 51 |
+
img = cv2.resize(frame, (299, 299))
|
| 52 |
+
img = transform(img).unsqueeze(0).to(device)
|
| 53 |
+
with torch.no_grad():
|
| 54 |
+
feat = model(img).squeeze().cpu().numpy()
|
| 55 |
+
features.append(feat)
|
| 56 |
+
return np.array(features)
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def compute_fid(input_frames, output_frames):
|
| 60 |
+
"""Calculate FID between input and output frames using Inception V3"""
|
| 61 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 62 |
+
inception = models.inception_v3(pretrained=True, transform_input=False).eval().to(device)
|
| 63 |
+
|
| 64 |
+
transform = transforms.Compose([
|
| 65 |
+
transforms.ToTensor(),
|
| 66 |
+
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
| 67 |
+
])
|
| 68 |
+
|
| 69 |
+
input_features = get_inception_features(input_frames, inception, transform, device)
|
| 70 |
+
output_features = get_inception_features(output_frames, inception, transform, device)
|
| 71 |
+
|
| 72 |
+
# Calculate mean and covariance
|
| 73 |
+
mu1, sigma1 = np.mean(input_features, axis=0), np.cov(input_features, rowvar=False)
|
| 74 |
+
mu2, sigma2 = np.mean(output_features, axis=0), np.cov(output_features, rowvar=False)
|
| 75 |
+
|
| 76 |
+
# Calculate FID
|
| 77 |
+
diff = mu1 - mu2
|
| 78 |
+
covmean = sqrtm(sigma1.dot(sigma2))
|
| 79 |
+
if np.iscomplexobj(covmean):
|
| 80 |
+
covmean = covmean.real
|
| 81 |
+
fid = diff.dot(diff) + np.trace(sigma1 + sigma2 - 2 * covmean)
|
| 82 |
+
return fid
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def evaluate_animeganv3(input_video, output_video, ssim_threshold=0.7, fid_threshold=400):
|
| 86 |
+
"""Evaluate AnimeGANv3 stylization effect using SSIM and FID"""
|
| 87 |
+
messages = []
|
| 88 |
+
messages.append(f"Evaluating videos:\nInput video: {input_video}\nOutput video: {output_video}")
|
| 89 |
+
|
| 90 |
+
# Extract frames
|
| 91 |
+
input_frames = extract_frames(input_video)
|
| 92 |
+
output_frames = extract_frames(output_video)
|
| 93 |
+
|
| 94 |
+
if len(input_frames) == 0 or len(output_frames) == 0:
|
| 95 |
+
messages.append("Error: Failed to extract frames from one or both videos.")
|
| 96 |
+
return False, "\n".join(messages)
|
| 97 |
+
|
| 98 |
+
if len(input_frames) != len(output_frames):
|
| 99 |
+
messages.append("Warning: Input and output videos have different frame counts.")
|
| 100 |
+
min_frames = min(len(input_frames), len(output_frames))
|
| 101 |
+
input_frames = input_frames[:min_frames]
|
| 102 |
+
output_frames = output_frames[:min_frames]
|
| 103 |
+
|
| 104 |
+
# Calculate SSIM
|
| 105 |
+
avg_ssim = compute_ssim(input_frames, output_frames)
|
| 106 |
+
messages.append(f"Average SSIM: {avg_ssim:.4f}")
|
| 107 |
+
|
| 108 |
+
# Calculate FID
|
| 109 |
+
fid_score = compute_fid(input_frames, output_frames)
|
| 110 |
+
messages.append(f"FID score: {fid_score:.2f}")
|
| 111 |
+
|
| 112 |
+
# Compare with thresholds
|
| 113 |
+
ssim_pass = avg_ssim >= ssim_threshold
|
| 114 |
+
fid_pass = fid_score <= fid_threshold
|
| 115 |
+
success = ssim_pass and fid_pass
|
| 116 |
+
|
| 117 |
+
result_message = f"\nEvaluation results:\n"
|
| 118 |
+
result_message += f"SSIM (≥ {ssim_threshold}): {'Pass' if ssim_pass else 'Fail'} ({avg_ssim:.4f})\n"
|
| 119 |
+
result_message += f"FID (≤ {fid_threshold}): {'Pass' if fid_pass else 'Fail'} ({fid_score:.2f})\n"
|
| 120 |
+
result_message += f"Overall success: {'Yes' if success else 'No'}"
|
| 121 |
+
messages.append(result_message)
|
| 122 |
+
|
| 123 |
+
return success, "\n".join(messages)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def is_valid_video_file(file_path):
|
| 127 |
+
"""Check if file exists, is non-empty and has valid video format (.mp4)"""
|
| 128 |
+
if not os.path.exists(file_path):
|
| 129 |
+
return False, f"File {file_path} does not exist."
|
| 130 |
+
if os.path.getsize(file_path) == 0:
|
| 131 |
+
return False, f"File {file_path} is empty."
|
| 132 |
+
if not file_path.lower().endswith('.mp4'):
|
| 133 |
+
return False, f"File {file_path} has incorrect format, only .mp4 supported."
|
| 134 |
+
return True, ""
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
def main():
|
| 138 |
+
parser = argparse.ArgumentParser(description="Evaluate AnimeGANv3 video stylization effect")
|
| 139 |
+
parser.add_argument("-i", "--groundtruth", required=True, help="Input video file path")
|
| 140 |
+
parser.add_argument("-o", "--output", required=True, help="Output video file path")
|
| 141 |
+
parser.add_argument("--ssim_threshold", type=float, default=0.7, help="SSIM success threshold")
|
| 142 |
+
parser.add_argument("--fid_threshold", type=float, default=400.0, help="FID success threshold")
|
| 143 |
+
parser.add_argument("--result", help="JSONL file path to save results")
|
| 144 |
+
|
| 145 |
+
args = parser.parse_args()
|
| 146 |
+
|
| 147 |
+
# Collect all output messages
|
| 148 |
+
messages = []
|
| 149 |
+
success = False
|
| 150 |
+
process_valid = True
|
| 151 |
+
|
| 152 |
+
# Validate input files
|
| 153 |
+
input_valid, input_error = is_valid_video_file(args.groundtruth)
|
| 154 |
+
output_valid, output_error = is_valid_video_file(args.output)
|
| 155 |
+
|
| 156 |
+
if not input_valid:
|
| 157 |
+
messages.append(input_error)
|
| 158 |
+
process_valid = False
|
| 159 |
+
if not output_valid:
|
| 160 |
+
messages.append(output_error)
|
| 161 |
+
process_valid = False
|
| 162 |
+
|
| 163 |
+
# If inputs are valid, run evaluation
|
| 164 |
+
if process_valid:
|
| 165 |
+
success, eval_message = evaluate_animeganv3(
|
| 166 |
+
args.groundtruth,
|
| 167 |
+
args.output,
|
| 168 |
+
args.ssim_threshold,
|
| 169 |
+
args.fid_threshold
|
| 170 |
+
)
|
| 171 |
+
messages.append(eval_message)
|
| 172 |
+
else:
|
| 173 |
+
messages.append("Evaluation not run due to input file validation failure.")
|
| 174 |
+
|
| 175 |
+
# Print all messages
|
| 176 |
+
print("\n".join(messages))
|
| 177 |
+
|
| 178 |
+
# If --result is specified, save to JSONL
|
| 179 |
+
# Modified result saving section in main() function:
|
| 180 |
+
if args.result:
|
| 181 |
+
result_entry = {
|
| 182 |
+
"Process": process_valid,
|
| 183 |
+
"Result": success,
|
| 184 |
+
"TimePoint": datetime.now().strftime("%Y-%m-%dT%H:%M:%S"),
|
| 185 |
+
"comments": "\n".join(messages)
|
| 186 |
+
}
|
| 187 |
+
try:
|
| 188 |
+
os.makedirs(os.path.dirname(args.result) or '.', exist_ok=True)
|
| 189 |
+
with open(args.result, 'a', encoding='utf-8') as f:
|
| 190 |
+
json_line = json.dumps(result_entry, ensure_ascii=False, default=str)
|
| 191 |
+
f.write(json_line + '\n') # Ensure newline append
|
| 192 |
+
except Exception as e:
|
| 193 |
+
print(f"Error: Failed to save results to {args.result}, reason: {str(e)}")
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
if __name__ == "__main__":
|
| 197 |
+
main()
|
test_scripts/DeOldify_01/test_script.py
ADDED
|
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import argparse
|
| 4 |
+
import json
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
import requests
|
| 7 |
+
from io import BytesIO
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
from PIL import Image
|
| 11 |
+
from skimage.color import rgb2lab, deltaE_ciede2000
|
| 12 |
+
from basicsr.metrics.niqe import calculate_niqe
|
| 13 |
+
|
| 14 |
+
def download_image(path_or_url: str) -> Image.Image:
|
| 15 |
+
"""Download or read local image, return PIL.Image in RGB mode."""
|
| 16 |
+
if path_or_url.startswith(('http://', 'https://')):
|
| 17 |
+
resp = requests.get(path_or_url, timeout=10)
|
| 18 |
+
if resp.status_code != 200:
|
| 19 |
+
raise ValueError(f"Failed to download image: {path_or_url} (status code {resp.status_code})")
|
| 20 |
+
data = BytesIO(resp.content)
|
| 21 |
+
else:
|
| 22 |
+
if not os.path.isfile(path_or_url):
|
| 23 |
+
raise ValueError(f"File does not exist: {path_or_url}")
|
| 24 |
+
data = path_or_url
|
| 25 |
+
try:
|
| 26 |
+
return Image.open(data).convert('RGB')
|
| 27 |
+
except Exception as e:
|
| 28 |
+
raise ValueError(f"Failed to open image: {e}")
|
| 29 |
+
|
| 30 |
+
def compute_ciede2000(ref_img: Image.Image, test_img: Image.Image) -> float:
|
| 31 |
+
arr_ref = np.asarray(ref_img, dtype=np.float32) / 255.0
|
| 32 |
+
arr_test = np.asarray(test_img, dtype=np.float32) / 255.0
|
| 33 |
+
lab_ref = rgb2lab(arr_ref)
|
| 34 |
+
lab_test = rgb2lab(arr_test)
|
| 35 |
+
delta = deltaE_ciede2000(lab_ref, lab_test)
|
| 36 |
+
return float(np.mean(delta))
|
| 37 |
+
|
| 38 |
+
def compute_niqe(img: Image.Image) -> float:
|
| 39 |
+
arr = np.asarray(img).astype(np.float32)
|
| 40 |
+
return float(calculate_niqe(arr, crop_border=0))
|
| 41 |
+
|
| 42 |
+
def write_result_jsonl(file_path: str, data: dict):
|
| 43 |
+
"""Append single result to file in JSONL format."""
|
| 44 |
+
try:
|
| 45 |
+
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
| 46 |
+
with open(file_path, 'a', encoding='utf-8') as f:
|
| 47 |
+
f.write(json.dumps(data, ensure_ascii=False, default=str) + '\n')
|
| 48 |
+
except Exception as e:
|
| 49 |
+
print(f"❌ Error writing JSONL file: {e}", file=sys.stderr)
|
| 50 |
+
|
| 51 |
+
def main():
|
| 52 |
+
p = argparse.ArgumentParser(
|
| 53 |
+
description="Evaluate colorization/enhancement effect using CIEDE2000 and NIQE metrics, with JSONL output option")
|
| 54 |
+
p.add_argument("--groundtruth", type=str, required=True, help="Reference image URL or local path")
|
| 55 |
+
p.add_argument("--output", type=str, required=True, help="Reconstructed image URL or local path")
|
| 56 |
+
p.add_argument("--ciede-thresh", type=float, required=True,
|
| 57 |
+
help="CIEDE2000 minimum acceptance threshold (higher is better)")
|
| 58 |
+
p.add_argument("--niqe-thresh", type=float, required=True,
|
| 59 |
+
help="NIQE maximum acceptance threshold (lower is better)")
|
| 60 |
+
p.add_argument("--result", help="File path to store JSONL results")
|
| 61 |
+
args = p.parse_args()
|
| 62 |
+
|
| 63 |
+
process_ok = True
|
| 64 |
+
comments = []
|
| 65 |
+
|
| 66 |
+
# Timestamp
|
| 67 |
+
time_point = datetime.now().isoformat()
|
| 68 |
+
|
| 69 |
+
# Load images
|
| 70 |
+
try:
|
| 71 |
+
img_ref = download_image(args.groundtruth)
|
| 72 |
+
img_recon = download_image(args.output)
|
| 73 |
+
except ValueError as err:
|
| 74 |
+
comments.append(str(err))
|
| 75 |
+
process_ok = False
|
| 76 |
+
|
| 77 |
+
# Calculate metrics only if Process OK
|
| 78 |
+
if process_ok:
|
| 79 |
+
try:
|
| 80 |
+
score_ciede = compute_ciede2000(img_ref, img_recon)
|
| 81 |
+
score_niqe = compute_niqe(img_recon)
|
| 82 |
+
|
| 83 |
+
comments.append(f"CIEDE2000 average color difference: {score_ciede:.4f} (threshold {args.ciede_thresh})")
|
| 84 |
+
comments.append(f"Reconstructed image NIQE score: {score_niqe:.4f} (threshold {args.niqe_thresh})")
|
| 85 |
+
|
| 86 |
+
ok_ciede = score_ciede >= args.ciede_thresh
|
| 87 |
+
ok_niqe = score_niqe <= args.niqe_thresh
|
| 88 |
+
|
| 89 |
+
if ok_ciede and ok_niqe:
|
| 90 |
+
comments.append("✅ Processing effect meets requirements: CIEDE2000↑ and NIQE↓ both satisfy thresholds")
|
| 91 |
+
result_ok = True
|
| 92 |
+
else:
|
| 93 |
+
fail_reasons = []
|
| 94 |
+
if not ok_ciede: fail_reasons.append("CIEDE2000 not met")
|
| 95 |
+
if not ok_niqe: fail_reasons.append("NIQE not met")
|
| 96 |
+
comments.append("❌ Processing effect does not meet requirements: " + " ".join(fail_reasons))
|
| 97 |
+
result_ok = False
|
| 98 |
+
except Exception as e:
|
| 99 |
+
comments.append(f"Exception during metric calculation: {e}")
|
| 100 |
+
result_ok = False
|
| 101 |
+
else:
|
| 102 |
+
result_ok = False
|
| 103 |
+
|
| 104 |
+
# Write to JSONL if --result is specified
|
| 105 |
+
if args.result:
|
| 106 |
+
record = {
|
| 107 |
+
"Process": process_ok,
|
| 108 |
+
"Result": result_ok,
|
| 109 |
+
"TimePoint": time_point,
|
| 110 |
+
"comments": "\n".join(comments)
|
| 111 |
+
}
|
| 112 |
+
write_result_jsonl(args.result, record)
|
| 113 |
+
|
| 114 |
+
# Print all comments
|
| 115 |
+
for line in comments:
|
| 116 |
+
print(line, file=(sys.stderr if not process_ok or not result_ok else sys.stdout))
|
| 117 |
+
|
| 118 |
+
if __name__ == "__main__":
|
| 119 |
+
main()
|
test_scripts/DeOldify_02/test_script.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import argparse
|
| 4 |
+
import json
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
import requests
|
| 7 |
+
from io import BytesIO
|
| 8 |
+
|
| 9 |
+
import numpy as np
|
| 10 |
+
from PIL import Image
|
| 11 |
+
from skimage.color import rgb2lab, deltaE_ciede2000
|
| 12 |
+
from basicsr.metrics.niqe import calculate_niqe
|
| 13 |
+
|
| 14 |
+
def download_image(path_or_url: str) -> Image.Image:
|
| 15 |
+
"""Download or read local image, return PIL.Image in RGB mode."""
|
| 16 |
+
if path_or_url.startswith(('http://', 'https://')):
|
| 17 |
+
resp = requests.get(path_or_url, timeout=10)
|
| 18 |
+
if resp.status_code != 200:
|
| 19 |
+
raise ValueError(f"Failed to download image: {path_or_url} (status code {resp.status_code})")
|
| 20 |
+
data = BytesIO(resp.content)
|
| 21 |
+
else:
|
| 22 |
+
if not os.path.isfile(path_or_url):
|
| 23 |
+
raise ValueError(f"File does not exist: {path_or_url}")
|
| 24 |
+
data = path_or_url
|
| 25 |
+
try:
|
| 26 |
+
return Image.open(data).convert('RGB')
|
| 27 |
+
except Exception as e:
|
| 28 |
+
raise ValueError(f"Failed to open image: {e}")
|
| 29 |
+
|
| 30 |
+
def compute_ciede2000(ref_img: Image.Image, test_img: Image.Image) -> float:
|
| 31 |
+
arr_ref = np.asarray(ref_img, dtype=np.float32) / 255.0
|
| 32 |
+
arr_test = np.asarray(test_img, dtype=np.float32) / 255.0
|
| 33 |
+
lab_ref = rgb2lab(arr_ref)
|
| 34 |
+
lab_test = rgb2lab(arr_test)
|
| 35 |
+
delta = deltaE_ciede2000(lab_ref, lab_test)
|
| 36 |
+
return float(np.mean(delta))
|
| 37 |
+
|
| 38 |
+
def compute_niqe(img: Image.Image) -> float:
|
| 39 |
+
arr = np.asarray(img).astype(np.float32)
|
| 40 |
+
return float(calculate_niqe(arr, crop_border=0))
|
| 41 |
+
|
| 42 |
+
def write_result_jsonl(file_path: str, data: dict):
|
| 43 |
+
"""Append single result to file in JSONL format."""
|
| 44 |
+
try:
|
| 45 |
+
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
| 46 |
+
with open(file_path, 'a', encoding='utf-8') as f:
|
| 47 |
+
f.write(json.dumps(data, ensure_ascii=False, default=str) + '\n')
|
| 48 |
+
except Exception as e:
|
| 49 |
+
print(f"❌ Error writing JSONL file: {e}", file=sys.stderr)
|
| 50 |
+
|
| 51 |
+
def main():
|
| 52 |
+
p = argparse.ArgumentParser(
|
| 53 |
+
description="Evaluate colorization/enhancement effect using CIEDE2000 and NIQE metrics, with JSONL output option")
|
| 54 |
+
p.add_argument("--groundtruth", type=str, required=True, help="Reference image URL or local path")
|
| 55 |
+
p.add_argument("--output", type=str, required=True, help="Reconstructed image URL or local path")
|
| 56 |
+
p.add_argument("--ciede-thresh", type=float, required=True,
|
| 57 |
+
help="CIEDE2000 minimum acceptance threshold (higher is better)")
|
| 58 |
+
p.add_argument("--niqe-thresh", type=float, required=True,
|
| 59 |
+
help="NIQE maximum acceptance threshold (lower is better)")
|
| 60 |
+
p.add_argument("--result", help="File path to store JSONL results")
|
| 61 |
+
args = p.parse_args()
|
| 62 |
+
|
| 63 |
+
process_ok = True
|
| 64 |
+
result_ok = False
|
| 65 |
+
comments = []
|
| 66 |
+
|
| 67 |
+
time_point = datetime.now().isoformat()
|
| 68 |
+
|
| 69 |
+
try:
|
| 70 |
+
img_ref = download_image(args.groundtruth)
|
| 71 |
+
img_recon = download_image(args.output)
|
| 72 |
+
except ValueError as err:
|
| 73 |
+
comments.append(str(err))
|
| 74 |
+
process_ok = False
|
| 75 |
+
|
| 76 |
+
if process_ok:
|
| 77 |
+
try:
|
| 78 |
+
score_ciede = compute_ciede2000(img_ref, img_recon)
|
| 79 |
+
score_niqe = compute_niqe(img_recon)
|
| 80 |
+
|
| 81 |
+
comments.append(f"CIEDE2000 average color difference: {score_ciede:.4f} (threshold {args.ciede_thresh})")
|
| 82 |
+
comments.append(f"Reconstructed image NIQE score: {score_niqe:.4f} (threshold {args.niqe_thresh})")
|
| 83 |
+
|
| 84 |
+
ok_ciede = score_ciede >= args.ciede_thresh
|
| 85 |
+
ok_niqe = score_niqe <= args.niqe_thresh
|
| 86 |
+
|
| 87 |
+
if ok_ciede and ok_niqe:
|
| 88 |
+
comments.append("✅ Processing effect meets requirements: CIEDE2000↑ and NIQE↓ both satisfy thresholds")
|
| 89 |
+
result_ok = True
|
| 90 |
+
else:
|
| 91 |
+
fail_reasons = []
|
| 92 |
+
if not ok_ciede:
|
| 93 |
+
fail_reasons.append("CIEDE2000 not met")
|
| 94 |
+
if not ok_niqe:
|
| 95 |
+
fail_reasons.append("NIQE not met")
|
| 96 |
+
comments.append("❌ Processing effect does not meet requirements: " + " ".join(fail_reasons))
|
| 97 |
+
except Exception as e:
|
| 98 |
+
comments.append(f"Exception during metric calculation: {e}")
|
| 99 |
+
|
| 100 |
+
if args.result:
|
| 101 |
+
record = {
|
| 102 |
+
"Process": process_ok,
|
| 103 |
+
"Result": result_ok,
|
| 104 |
+
"TimePoint": time_point,
|
| 105 |
+
"comments": "\n".join(comments)
|
| 106 |
+
}
|
| 107 |
+
write_result_jsonl(args.result, record)
|
| 108 |
+
|
| 109 |
+
for line in comments:
|
| 110 |
+
print(line, file=(sys.stderr if not process_ok or not result_ok else sys.stdout))
|
| 111 |
+
|
| 112 |
+
if __name__ == "__main__":
|
| 113 |
+
main()
|
test_scripts/DeOldify_03/test_script.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import argparse
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
import cv2
|
| 6 |
+
import numpy as np
|
| 7 |
+
import json
|
| 8 |
+
from datetime import datetime
|
| 9 |
+
|
| 10 |
+
def compute_colorfulness(frame):
|
| 11 |
+
# Convert to float for computation
|
| 12 |
+
b, g, r = cv2.split(frame.astype('float'))
|
| 13 |
+
# Compute RG and YB components
|
| 14 |
+
rg = r - g
|
| 15 |
+
yb = 0.5 * (r + g) - b
|
| 16 |
+
# Compute statistics
|
| 17 |
+
std_rg, std_yb = np.std(rg), np.std(yb)
|
| 18 |
+
mean_rg, mean_yb = np.mean(rg), np.mean(yb)
|
| 19 |
+
# Colorfulness metric
|
| 20 |
+
return np.sqrt(std_rg**2 + std_yb**2) + 0.3 * np.sqrt(mean_rg**2 + mean_yb**2)
|
| 21 |
+
|
| 22 |
+
def sample_frames(video_path, max_frames=30):
|
| 23 |
+
cap = cv2.VideoCapture(video_path)
|
| 24 |
+
if not cap.isOpened():
|
| 25 |
+
return []
|
| 26 |
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 27 |
+
step = max(1, total_frames // max_frames)
|
| 28 |
+
frames = []
|
| 29 |
+
for idx in range(0, total_frames, step):
|
| 30 |
+
cap.set(cv2.CAP_PROP_POS_FRAMES, idx)
|
| 31 |
+
ret, frame = cap.read()
|
| 32 |
+
if not ret:
|
| 33 |
+
break
|
| 34 |
+
# Resize frame to width=256, keep aspect ratio
|
| 35 |
+
h, w = frame.shape[:2]
|
| 36 |
+
new_w = 256
|
| 37 |
+
new_h = int(h * (256 / w))
|
| 38 |
+
frame = cv2.resize(frame, (new_w, new_h))
|
| 39 |
+
frames.append(frame)
|
| 40 |
+
cap.release()
|
| 41 |
+
return frames
|
| 42 |
+
|
| 43 |
+
def main():
|
| 44 |
+
parser = argparse.ArgumentParser(description="Evaluate video colorfulness (detect non-BW video).")
|
| 45 |
+
parser.add_argument("--output", type=str, required=True,
|
| 46 |
+
help="Path to the input video file.")
|
| 47 |
+
parser.add_argument("--threshold", type=float, default=10.0,
|
| 48 |
+
help="Colorfulness threshold for pass/fail (default: 10.0).")
|
| 49 |
+
parser.add_argument("--result", help="Path to append the jsonl result.")
|
| 50 |
+
args = parser.parse_args()
|
| 51 |
+
|
| 52 |
+
input_path = args.output
|
| 53 |
+
process_ok = False
|
| 54 |
+
results_ok = False
|
| 55 |
+
comments = []
|
| 56 |
+
|
| 57 |
+
# Check file existence and basic validity
|
| 58 |
+
if not os.path.exists(input_path):
|
| 59 |
+
comments.append(f"Input file not found: {input_path}")
|
| 60 |
+
elif os.path.getsize(input_path) == 0:
|
| 61 |
+
comments.append(f"Input file is empty: {input_path}")
|
| 62 |
+
else:
|
| 63 |
+
ext = os.path.splitext(input_path)[1].lower()
|
| 64 |
+
if ext not in ['.mp4', '.avi', '.mov', '.mkv']:
|
| 65 |
+
comments.append(f"Unsupported file format: {ext}")
|
| 66 |
+
else:
|
| 67 |
+
process_ok = True
|
| 68 |
+
|
| 69 |
+
if process_ok:
|
| 70 |
+
frames = sample_frames(input_path)
|
| 71 |
+
if not frames:
|
| 72 |
+
comments.append("Failed to read any frames from video.")
|
| 73 |
+
else:
|
| 74 |
+
scores = [compute_colorfulness(f) for f in frames]
|
| 75 |
+
avg_score = float(np.mean(scores))
|
| 76 |
+
comments.append(f"Average colorfulness: {avg_score:.2f}")
|
| 77 |
+
results_ok = avg_score > args.threshold
|
| 78 |
+
comments.append("Pass" if results_ok else "Fail")
|
| 79 |
+
|
| 80 |
+
# Print result
|
| 81 |
+
print("=== Evaluation ===")
|
| 82 |
+
print("Process OK: ", process_ok)
|
| 83 |
+
if process_ok:
|
| 84 |
+
print("Result OK: ", results_ok)
|
| 85 |
+
for c in comments:
|
| 86 |
+
print(c)
|
| 87 |
+
|
| 88 |
+
# Append to jsonl
|
| 89 |
+
if args.result:
|
| 90 |
+
record = {
|
| 91 |
+
"Process": process_ok,
|
| 92 |
+
"Result": results_ok,
|
| 93 |
+
"TimePoint": datetime.now().isoformat(),
|
| 94 |
+
"comments": "; ".join(comments)
|
| 95 |
+
}
|
| 96 |
+
os.makedirs(os.path.dirname(args.result), exist_ok=True)
|
| 97 |
+
with open(args.result, 'a', encoding='utf-8') as f:
|
| 98 |
+
json_line = json.dumps(record, default=str, ensure_ascii=False)
|
| 99 |
+
f.write(json_line + "\n")
|
| 100 |
+
|
| 101 |
+
if __name__ == "__main__":
|
| 102 |
+
main()
|
test_scripts/DeScratch_01/detection.py
ADDED
|
@@ -0,0 +1,178 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Microsoft Corporation.
|
| 2 |
+
# Licensed under the MIT License.
|
| 3 |
+
|
| 4 |
+
import argparse
|
| 5 |
+
import gc
|
| 6 |
+
import json
|
| 7 |
+
import os
|
| 8 |
+
import time
|
| 9 |
+
import warnings
|
| 10 |
+
|
| 11 |
+
import numpy as np
|
| 12 |
+
import torch
|
| 13 |
+
import torch.nn.functional as F
|
| 14 |
+
import torchvision as tv
|
| 15 |
+
from PIL import Image, ImageFile
|
| 16 |
+
|
| 17 |
+
from detection_models import networks
|
| 18 |
+
from detection_util.util import *
|
| 19 |
+
|
| 20 |
+
warnings.filterwarnings("ignore", category=UserWarning)
|
| 21 |
+
|
| 22 |
+
ImageFile.LOAD_TRUNCATED_IMAGES = True
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def data_transforms(img, full_size, method=Image.BICUBIC):
|
| 26 |
+
if full_size == "full_size":
|
| 27 |
+
ow, oh = img.size
|
| 28 |
+
h = int(round(oh / 16) * 16)
|
| 29 |
+
w = int(round(ow / 16) * 16)
|
| 30 |
+
if (h == oh) and (w == ow):
|
| 31 |
+
return img
|
| 32 |
+
return img.resize((w, h), method)
|
| 33 |
+
|
| 34 |
+
elif full_size == "scale_256":
|
| 35 |
+
ow, oh = img.size
|
| 36 |
+
pw, ph = ow, oh
|
| 37 |
+
if ow < oh:
|
| 38 |
+
ow = 256
|
| 39 |
+
oh = ph / pw * 256
|
| 40 |
+
else:
|
| 41 |
+
oh = 256
|
| 42 |
+
ow = pw / ph * 256
|
| 43 |
+
|
| 44 |
+
h = int(round(oh / 16) * 16)
|
| 45 |
+
w = int(round(ow / 16) * 16)
|
| 46 |
+
if (h == ph) and (w == pw):
|
| 47 |
+
return img
|
| 48 |
+
return img.resize((w, h), method)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def scale_tensor(img_tensor, default_scale=256):
|
| 52 |
+
_, _, w, h = img_tensor.shape
|
| 53 |
+
if w < h:
|
| 54 |
+
ow = default_scale
|
| 55 |
+
oh = h / w * default_scale
|
| 56 |
+
else:
|
| 57 |
+
oh = default_scale
|
| 58 |
+
ow = w / h * default_scale
|
| 59 |
+
|
| 60 |
+
oh = int(round(oh / 16) * 16)
|
| 61 |
+
ow = int(round(ow / 16) * 16)
|
| 62 |
+
|
| 63 |
+
return F.interpolate(img_tensor, [ow, oh], mode="bilinear")
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def blend_mask(img, mask):
|
| 67 |
+
|
| 68 |
+
np_img = np.array(img).astype("float")
|
| 69 |
+
|
| 70 |
+
return Image.fromarray((np_img * (1 - mask) + mask * 255.0).astype("uint8")).convert("RGB")
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def main(config):
|
| 74 |
+
print("initializing the dataloader")
|
| 75 |
+
|
| 76 |
+
model = networks.UNet(
|
| 77 |
+
in_channels=1,
|
| 78 |
+
out_channels=1,
|
| 79 |
+
depth=4,
|
| 80 |
+
conv_num=2,
|
| 81 |
+
wf=6,
|
| 82 |
+
padding=True,
|
| 83 |
+
batch_norm=True,
|
| 84 |
+
up_mode="upsample",
|
| 85 |
+
with_tanh=False,
|
| 86 |
+
sync_bn=True,
|
| 87 |
+
antialiasing=True,
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
## load model
|
| 91 |
+
checkpoint_path = os.path.join(os.path.dirname(__file__), "checkpoints/detection/FT_Epoch_latest.pt")
|
| 92 |
+
checkpoint = torch.load(checkpoint_path, map_location="cpu")
|
| 93 |
+
model.load_state_dict(checkpoint["model_state"])
|
| 94 |
+
print("model weights loaded")
|
| 95 |
+
|
| 96 |
+
if config.GPU >= 0:
|
| 97 |
+
model.to(config.GPU)
|
| 98 |
+
else:
|
| 99 |
+
model.cpu()
|
| 100 |
+
model.eval()
|
| 101 |
+
|
| 102 |
+
## dataloader and transformation
|
| 103 |
+
print("directory of testing image: " + config.test_path)
|
| 104 |
+
imagelist = os.listdir(config.test_path)
|
| 105 |
+
imagelist.sort()
|
| 106 |
+
total_iter = 0
|
| 107 |
+
|
| 108 |
+
P_matrix = {}
|
| 109 |
+
save_url = os.path.join(config.output_dir)
|
| 110 |
+
mkdir_if_not(save_url)
|
| 111 |
+
|
| 112 |
+
input_dir = os.path.join(save_url, "input")
|
| 113 |
+
output_dir = os.path.join(save_url, "mask")
|
| 114 |
+
# blend_output_dir=os.path.join(save_url, 'blend_output')
|
| 115 |
+
mkdir_if_not(input_dir)
|
| 116 |
+
mkdir_if_not(output_dir)
|
| 117 |
+
# mkdir_if_not(blend_output_dir)
|
| 118 |
+
|
| 119 |
+
idx = 0
|
| 120 |
+
|
| 121 |
+
results = []
|
| 122 |
+
for image_name in imagelist:
|
| 123 |
+
|
| 124 |
+
idx += 1
|
| 125 |
+
|
| 126 |
+
print("processing", image_name)
|
| 127 |
+
|
| 128 |
+
scratch_file = os.path.join(config.test_path, image_name)
|
| 129 |
+
if not os.path.isfile(scratch_file):
|
| 130 |
+
print("Skipping non-file %s" % image_name)
|
| 131 |
+
continue
|
| 132 |
+
scratch_image = Image.open(scratch_file).convert("RGB")
|
| 133 |
+
w, h = scratch_image.size
|
| 134 |
+
|
| 135 |
+
transformed_image_PIL = data_transforms(scratch_image, config.input_size)
|
| 136 |
+
scratch_image = transformed_image_PIL.convert("L")
|
| 137 |
+
scratch_image = tv.transforms.ToTensor()(scratch_image)
|
| 138 |
+
scratch_image = tv.transforms.Normalize([0.5], [0.5])(scratch_image)
|
| 139 |
+
scratch_image = torch.unsqueeze(scratch_image, 0)
|
| 140 |
+
_, _, ow, oh = scratch_image.shape
|
| 141 |
+
scratch_image_scale = scale_tensor(scratch_image)
|
| 142 |
+
|
| 143 |
+
if config.GPU >= 0:
|
| 144 |
+
scratch_image_scale = scratch_image_scale.to(config.GPU)
|
| 145 |
+
else:
|
| 146 |
+
scratch_image_scale = scratch_image_scale.cpu()
|
| 147 |
+
with torch.no_grad():
|
| 148 |
+
P = torch.sigmoid(model(scratch_image_scale))
|
| 149 |
+
|
| 150 |
+
P = P.data.cpu()
|
| 151 |
+
P = F.interpolate(P, [ow, oh], mode="nearest")
|
| 152 |
+
|
| 153 |
+
tv.utils.save_image(
|
| 154 |
+
(P >= 0.4).float(),
|
| 155 |
+
os.path.join(
|
| 156 |
+
output_dir,
|
| 157 |
+
image_name[:-4] + ".png",
|
| 158 |
+
),
|
| 159 |
+
nrow=1,
|
| 160 |
+
padding=0,
|
| 161 |
+
normalize=True,
|
| 162 |
+
)
|
| 163 |
+
transformed_image_PIL.save(os.path.join(input_dir, image_name[:-4] + ".png"))
|
| 164 |
+
gc.collect()
|
| 165 |
+
torch.cuda.empty_cache()
|
| 166 |
+
|
| 167 |
+
|
| 168 |
+
if __name__ == "__main__":
|
| 169 |
+
parser = argparse.ArgumentParser()
|
| 170 |
+
# parser.add_argument('--checkpoint_name', type=str, default="FT_Epoch_latest.pt", help='Checkpoint Name')
|
| 171 |
+
|
| 172 |
+
parser.add_argument("--GPU", type=int, default=0)
|
| 173 |
+
parser.add_argument("--test_path", type=str, default=".")
|
| 174 |
+
parser.add_argument("--output_dir", type=str, default=".")
|
| 175 |
+
parser.add_argument("--input_size", type=str, default="scale_256", help="resize_256|full_size|scale_256")
|
| 176 |
+
config = parser.parse_args()
|
| 177 |
+
|
| 178 |
+
main(config)
|
test_scripts/DeScratch_01/test_script.py
ADDED
|
@@ -0,0 +1,133 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import argparse
|
| 3 |
+
import os
|
| 4 |
+
import json
|
| 5 |
+
import datetime
|
| 6 |
+
import numpy as np
|
| 7 |
+
from PIL import Image, UnidentifiedImageError
|
| 8 |
+
import cv2
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def main():
|
| 12 |
+
parser = argparse.ArgumentParser(description='Automated scratch detection test script')
|
| 13 |
+
parser.add_argument(
|
| 14 |
+
'--output',
|
| 15 |
+
required=True,
|
| 16 |
+
help='Path to output image for detection'
|
| 17 |
+
)
|
| 18 |
+
parser.add_argument(
|
| 19 |
+
'--result',
|
| 20 |
+
required=True,
|
| 21 |
+
help='Path to result JSONL file (created if not exists, appended if exists)'
|
| 22 |
+
)
|
| 23 |
+
parser.add_argument(
|
| 24 |
+
'--threshold',
|
| 25 |
+
type=float,
|
| 26 |
+
default=0.05,
|
| 27 |
+
help='Scratch detection threshold, default 0.05'
|
| 28 |
+
)
|
| 29 |
+
parser.add_argument(
|
| 30 |
+
'--min-length',
|
| 31 |
+
type=int,
|
| 32 |
+
default=50,
|
| 33 |
+
help='Minimum scratch length, default 50 pixels'
|
| 34 |
+
)
|
| 35 |
+
args = parser.parse_args()
|
| 36 |
+
process = False
|
| 37 |
+
result = False
|
| 38 |
+
comments = []
|
| 39 |
+
# —— Step 1: Validate input file ——
|
| 40 |
+
if not os.path.isfile(args.output):
|
| 41 |
+
comments.append(f'File not found: {args.output}')
|
| 42 |
+
elif os.path.getsize(args.output) == 0:
|
| 43 |
+
comments.append(f'File is empty: {args.output}')
|
| 44 |
+
else:
|
| 45 |
+
try:
|
| 46 |
+
# Verify format
|
| 47 |
+
img = Image.open(args.output)
|
| 48 |
+
img.verify()
|
| 49 |
+
process = True
|
| 50 |
+
# Reopen to read pixels
|
| 51 |
+
img = Image.open(args.output)
|
| 52 |
+
# Convert to numpy array
|
| 53 |
+
img_array = np.array(img)
|
| 54 |
+
|
| 55 |
+
# —— Step 2: Scratch detection logic ——
|
| 56 |
+
# Convert to grayscale
|
| 57 |
+
if len(img_array.shape) == 3:
|
| 58 |
+
gray_img = cv2.cvtColor(img_array, cv2.COLOR_RGB2GRAY)
|
| 59 |
+
else:
|
| 60 |
+
gray_img = img_array
|
| 61 |
+
|
| 62 |
+
# Apply Gaussian blur to remove noise
|
| 63 |
+
blurred = cv2.GaussianBlur(gray_img, (5, 5), 0)
|
| 64 |
+
|
| 65 |
+
# Use Canny edge detection
|
| 66 |
+
edges = cv2.Canny(blurred, 50, 150)
|
| 67 |
+
|
| 68 |
+
# Use Hough transform to detect lines
|
| 69 |
+
lines = cv2.HoughLinesP(edges, 1, np.pi / 180,
|
| 70 |
+
threshold=50,
|
| 71 |
+
minLineLength=args.min_length,
|
| 72 |
+
maxLineGap=10)
|
| 73 |
+
|
| 74 |
+
# Calculate scratch features
|
| 75 |
+
if lines is not None:
|
| 76 |
+
scratch_count = len(lines)
|
| 77 |
+
# Calculate cumulative length and average intensity
|
| 78 |
+
total_length = 0
|
| 79 |
+
line_intensities = []
|
| 80 |
+
|
| 81 |
+
for line in lines:
|
| 82 |
+
x1, y1, x2, y2 = line[0]
|
| 83 |
+
length = np.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2)
|
| 84 |
+
total_length += length
|
| 85 |
+
|
| 86 |
+
# Calculate average intensity along the line
|
| 87 |
+
line_points = np.linspace((x1, y1), (x2, y2), int(length), dtype=np.int32)
|
| 88 |
+
points_intensity = []
|
| 89 |
+
for x, y in line_points:
|
| 90 |
+
if 0 <= x < gray_img.shape[1] and 0 <= y < gray_img.shape[0]:
|
| 91 |
+
points_intensity.append(gray_img[y, x])
|
| 92 |
+
|
| 93 |
+
if points_intensity:
|
| 94 |
+
line_intensities.append(np.mean(points_intensity))
|
| 95 |
+
|
| 96 |
+
# Calculate features
|
| 97 |
+
avg_intensity = np.mean(line_intensities) if line_intensities else 0
|
| 98 |
+
intensity_std = np.std(line_intensities) if line_intensities else 0
|
| 99 |
+
avg_length = total_length / scratch_count if scratch_count > 0 else 0
|
| 100 |
+
|
| 101 |
+
# Scratch score - combines line count, length and intensity variation
|
| 102 |
+
scratch_score = (scratch_count * avg_length * intensity_std) / (img_array.size * 255)
|
| 103 |
+
|
| 104 |
+
if scratch_score > args.threshold:
|
| 105 |
+
comments.append(
|
| 106 |
+
f'Potential scratches detected: {scratch_count} lines, avg length {avg_length:.2f}px, intensity variation {intensity_std:.2f}, score {scratch_score:.6f}, exceeds threshold {args.threshold}')
|
| 107 |
+
result = False
|
| 108 |
+
else:
|
| 109 |
+
comments.append(f'No significant scratches detected: score {scratch_score:.6f}, below threshold {args.threshold}')
|
| 110 |
+
result = True
|
| 111 |
+
else:
|
| 112 |
+
comments.append('No lines detected, no scratches found')
|
| 113 |
+
result = True
|
| 114 |
+
|
| 115 |
+
except UnidentifiedImageError as e:
|
| 116 |
+
comments.append(f'Invalid image format: {e}')
|
| 117 |
+
except Exception as e:
|
| 118 |
+
comments.append(f'Error reading image: {e}')
|
| 119 |
+
print("; ".join(comments))
|
| 120 |
+
# —— Step 3: Write to JSONL ——
|
| 121 |
+
entry = {
|
| 122 |
+
"Process": process,
|
| 123 |
+
"Result": result,
|
| 124 |
+
"TimePoint": datetime.datetime.now().isoformat(sep='T', timespec='seconds'),
|
| 125 |
+
"comments": "; ".join(comments)
|
| 126 |
+
}
|
| 127 |
+
# Append mode, one entry per line
|
| 128 |
+
with open(args.result, 'a', encoding='utf-8') as f:
|
| 129 |
+
f.write(json.dumps(entry, ensure_ascii=False, default=str) + "\n")
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
if __name__ == "__main__":
|
| 133 |
+
main()
|
test_scripts/DeScratch_02/test_script.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import numpy as np
|
| 3 |
+
from PIL import Image
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
import argparse
|
| 6 |
+
import json
|
| 7 |
+
from datetime import datetime
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def evaluate_mask(pred_mask, gt_mask):
|
| 11 |
+
# Convert masks to boolean values, calculate IoU and Dice coefficient
|
| 12 |
+
pred_mask = pred_mask.astype(bool)
|
| 13 |
+
gt_mask = gt_mask.astype(bool)
|
| 14 |
+
|
| 15 |
+
intersection = np.logical_and(pred_mask, gt_mask).sum()
|
| 16 |
+
union = np.logical_or(pred_mask, gt_mask).sum()
|
| 17 |
+
iou = intersection / union if union != 0 else 1.0
|
| 18 |
+
|
| 19 |
+
dice = (2 * intersection) / (pred_mask.sum() + gt_mask.sum()) if (pred_mask.sum() + gt_mask.sum()) != 0 else 1.0
|
| 20 |
+
|
| 21 |
+
return {"IoU": iou, "Dice": dice}
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def main(pred_dir, gt_dir, iou_threshold=0.5, dice_threshold=0.6, result_file=None):
|
| 25 |
+
all_metrics = []
|
| 26 |
+
|
| 27 |
+
# Initialize Process with default status True (files exist and valid)
|
| 28 |
+
process_result = {"Process": True, "Result": False, "TimePoint": "", "comments": ""}
|
| 29 |
+
process_result["TimePoint"] = datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
|
| 30 |
+
|
| 31 |
+
print(f"\nStarting evaluation task:")
|
| 32 |
+
print(f"Predicted masks path: {pred_dir}")
|
| 33 |
+
print(f"Ground truth masks path: {gt_dir}\n")
|
| 34 |
+
|
| 35 |
+
# Validate input paths
|
| 36 |
+
if not os.path.exists(pred_dir) or not os.path.exists(gt_dir):
|
| 37 |
+
process_result["Process"] = False
|
| 38 |
+
process_result["comments"] = "Path does not exist"
|
| 39 |
+
print("❌ Predicted or ground truth masks path does not exist")
|
| 40 |
+
save_result(result_file, process_result)
|
| 41 |
+
return
|
| 42 |
+
|
| 43 |
+
# Check each file in directory
|
| 44 |
+
for filename in tqdm(os.listdir(gt_dir)):
|
| 45 |
+
# Check if file extension is valid image format
|
| 46 |
+
if not filename.lower().endswith(('.png', '.jpg', '.jpeg')):
|
| 47 |
+
continue
|
| 48 |
+
|
| 49 |
+
gt_path = os.path.join(gt_dir, filename)
|
| 50 |
+
|
| 51 |
+
# Automatically find predicted filename matching output.*
|
| 52 |
+
pred_filename = next((f for f in os.listdir(pred_dir) if
|
| 53 |
+
f.startswith('output.') and f.lower().endswith(('.png', '.jpg', '.jpeg'))), None)
|
| 54 |
+
|
| 55 |
+
if not pred_filename:
|
| 56 |
+
print(f"⚠️ Missing predicted file: {filename}")
|
| 57 |
+
continue
|
| 58 |
+
|
| 59 |
+
pred_path = os.path.join(pred_dir, pred_filename)
|
| 60 |
+
|
| 61 |
+
# Read ground truth and predicted masks
|
| 62 |
+
gt_mask = np.array(Image.open(gt_path).convert("L")) > 128
|
| 63 |
+
pred_mask = np.array(Image.open(pred_path).convert("L")) > 128
|
| 64 |
+
|
| 65 |
+
# Evaluate and calculate IoU and Dice
|
| 66 |
+
metrics = evaluate_mask(pred_mask, gt_mask)
|
| 67 |
+
|
| 68 |
+
# Check if passes evaluation thresholds
|
| 69 |
+
passed = metrics["IoU"] >= iou_threshold and metrics["Dice"] >= dice_threshold
|
| 70 |
+
status = "✅ Passed" if passed else "❌ Failed"
|
| 71 |
+
|
| 72 |
+
print(f"{filename:20s} | IoU: {metrics['IoU']:.3f} | Dice: {metrics['Dice']:.3f} | {status}")
|
| 73 |
+
all_metrics.append(metrics)
|
| 74 |
+
|
| 75 |
+
# If no files were evaluated, notify user
|
| 76 |
+
if not all_metrics:
|
| 77 |
+
print("\n⚠️ No valid image pairs found for evaluation, please check folder paths.")
|
| 78 |
+
process_result["Process"] = False
|
| 79 |
+
process_result["comments"] = "No valid image pairs for evaluation"
|
| 80 |
+
save_result(result_file, process_result)
|
| 81 |
+
return
|
| 82 |
+
|
| 83 |
+
# Calculate average results across all files
|
| 84 |
+
avg_metrics = {k: np.mean([m[k] for m in all_metrics]) for k in all_metrics[0].keys()}
|
| 85 |
+
print("\n📊 Overall average results:")
|
| 86 |
+
print(f"Average IoU: {avg_metrics['IoU']:.3f}")
|
| 87 |
+
print(f"Average Dice: {avg_metrics['Dice']:.3f}")
|
| 88 |
+
|
| 89 |
+
# Determine final result
|
| 90 |
+
if avg_metrics["IoU"] >= iou_threshold and avg_metrics["Dice"] >= dice_threshold:
|
| 91 |
+
process_result["Result"] = True
|
| 92 |
+
process_result[
|
| 93 |
+
"comments"] = f"All images passed, average IoU: {avg_metrics['IoU']:.3f}, average Dice: {avg_metrics['Dice']:.3f}"
|
| 94 |
+
print(f"✅ Test passed!")
|
| 95 |
+
else:
|
| 96 |
+
process_result["Result"] = False
|
| 97 |
+
process_result[
|
| 98 |
+
"comments"] = f"Test failed, average IoU: {avg_metrics['IoU']:.3f}, average Dice: {avg_metrics['Dice']:.3f}"
|
| 99 |
+
print(f"❌ Test failed")
|
| 100 |
+
|
| 101 |
+
save_result(result_file, process_result)
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def save_result(result_file, result):
|
| 105 |
+
# Save test results to jsonl file, append if file exists
|
| 106 |
+
if result_file:
|
| 107 |
+
try:
|
| 108 |
+
with open(result_file, "a", encoding="utf-8") as f:
|
| 109 |
+
f.write(json.dumps(result, default=str) + "\n")
|
| 110 |
+
except Exception as e:
|
| 111 |
+
print(f"⚠️ Error writing result file: {e}")
|
| 112 |
+
|
| 113 |
+
|
| 114 |
+
if __name__ == "__main__":
|
| 115 |
+
parser = argparse.ArgumentParser()
|
| 116 |
+
parser.add_argument('--output', type=str, required=True, help="Folder containing predicted mask images")
|
| 117 |
+
parser.add_argument('--groundtruth', type=str, required=True, help="Folder containing ground truth mask images")
|
| 118 |
+
parser.add_argument('--result', type=str, required=True, help="Path to jsonl file for storing test results")
|
| 119 |
+
args = parser.parse_args()
|
| 120 |
+
|
| 121 |
+
main(args.output, args.groundtruth, result_file=args.result)
|
test_scripts/DeScratch_03/test_script.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import argparse
|
| 3 |
+
import cv2
|
| 4 |
+
import numpy as np
|
| 5 |
+
import json
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
from skimage.metrics import peak_signal_noise_ratio as psnr
|
| 8 |
+
from skimage.metrics import structural_similarity as ssim
|
| 9 |
+
import glob
|
| 10 |
+
|
| 11 |
+
def find_single_image(directory, pattern):
|
| 12 |
+
"""Find single image file in specified directory using glob pattern."""
|
| 13 |
+
files = glob.glob(os.path.join(directory, pattern))
|
| 14 |
+
if len(files) == 1:
|
| 15 |
+
return files[0]
|
| 16 |
+
elif len(files) == 0:
|
| 17 |
+
print(f"⚠️ No matching {pattern} image found in {directory}")
|
| 18 |
+
else:
|
| 19 |
+
print(f"⚠️ Multiple matching {pattern} images found in {directory}")
|
| 20 |
+
return None
|
| 21 |
+
|
| 22 |
+
def evaluate_quality(pred_dir, gt_dir, threshold_ssim=0.65, threshold_psnr=15, result_file=None):
|
| 23 |
+
result = {
|
| 24 |
+
"Process": True,
|
| 25 |
+
"Result": False,
|
| 26 |
+
"TimePoint": datetime.now().strftime("%Y-%m-%dT%H:%M:%S"),
|
| 27 |
+
"comments": ""
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
print(f"\nStarting evaluation task:")
|
| 31 |
+
print(f"Predicted images path: {pred_dir}")
|
| 32 |
+
print(f"Ground truth images path: {gt_dir}\n")
|
| 33 |
+
|
| 34 |
+
if not os.path.exists(pred_dir) or not os.path.exists(gt_dir):
|
| 35 |
+
result["Process"] = False
|
| 36 |
+
result["comments"] = "Path does not exist"
|
| 37 |
+
print("❌ Path does not exist")
|
| 38 |
+
save_result(result_file, result)
|
| 39 |
+
return
|
| 40 |
+
|
| 41 |
+
pred_path = find_single_image(pred_dir, "output.*")
|
| 42 |
+
gt_path = find_single_image(gt_dir, "gt.*")
|
| 43 |
+
|
| 44 |
+
if not pred_path or not gt_path:
|
| 45 |
+
result["Process"] = False
|
| 46 |
+
result["comments"] = "Predicted or GT image missing or multiple matches"
|
| 47 |
+
save_result(result_file, result)
|
| 48 |
+
return
|
| 49 |
+
|
| 50 |
+
pred_img = cv2.imread(pred_path)
|
| 51 |
+
gt_img = cv2.imread(gt_path)
|
| 52 |
+
|
| 53 |
+
if pred_img is None or gt_img is None:
|
| 54 |
+
result["Process"] = False
|
| 55 |
+
result["comments"] = "Failed to read images"
|
| 56 |
+
print("⚠️ Failed to read images")
|
| 57 |
+
save_result(result_file, result)
|
| 58 |
+
return
|
| 59 |
+
|
| 60 |
+
pred_img = cv2.resize(pred_img, (gt_img.shape[1], gt_img.shape[0]))
|
| 61 |
+
pred_gray = cv2.cvtColor(pred_img, cv2.COLOR_BGR2GRAY)
|
| 62 |
+
gt_gray = cv2.cvtColor(gt_img, cv2.COLOR_BGR2GRAY)
|
| 63 |
+
|
| 64 |
+
ssim_val = ssim(gt_gray, pred_gray)
|
| 65 |
+
psnr_val = psnr(gt_gray, pred_gray)
|
| 66 |
+
|
| 67 |
+
print(f"Structural Similarity (SSIM): {ssim_val:.4f}")
|
| 68 |
+
print(f"Peak Signal-to-Noise Ratio (PSNR): {psnr_val:.2f}")
|
| 69 |
+
|
| 70 |
+
if ssim_val >= threshold_ssim and psnr_val >= threshold_psnr:
|
| 71 |
+
result["Result"] = True
|
| 72 |
+
result["comments"] = f"Test passed, SSIM={ssim_val:.4f}, PSNR={psnr_val:.2f}"
|
| 73 |
+
print("✅ Restoration quality meets requirements")
|
| 74 |
+
else:
|
| 75 |
+
result["Result"] = False
|
| 76 |
+
result["comments"] = f"Test failed, SSIM={ssim_val:.4f}, PSNR={psnr_val:.2f}"
|
| 77 |
+
print("❌ Restoration quality does not meet requirements")
|
| 78 |
+
|
| 79 |
+
save_result(result_file, result)
|
| 80 |
+
|
| 81 |
+
def save_result(result_file, result):
|
| 82 |
+
if result_file:
|
| 83 |
+
try:
|
| 84 |
+
with open(result_file, "a", encoding="utf-8") as f:
|
| 85 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 86 |
+
print(f"[Success] Output file: {result_file}")
|
| 87 |
+
except Exception as e:
|
| 88 |
+
print(f"⚠️ Failed to write result file: {e}")
|
| 89 |
+
|
| 90 |
+
if __name__ == "__main__":
|
| 91 |
+
parser = argparse.ArgumentParser()
|
| 92 |
+
parser.add_argument('--output', type=str, required=True, help='Predicted results folder')
|
| 93 |
+
parser.add_argument('--groundtruth', type=str, required=True, help='Original GT folder')
|
| 94 |
+
parser.add_argument('--result', type=str, required=True, help='Output JSONL file for results')
|
| 95 |
+
args = parser.parse_args()
|
| 96 |
+
|
| 97 |
+
evaluate_quality(args.output, args.groundtruth, result_file=args.result)
|
test_scripts/Eparse_01/test_script.py
ADDED
|
@@ -0,0 +1,418 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import csv
|
| 3 |
+
import json
|
| 4 |
+
import os
|
| 5 |
+
import re
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
from collections import defaultdict
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def load_txt_file(file_path):
|
| 11 |
+
"""Load TXT file content"""
|
| 12 |
+
try:
|
| 13 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
| 14 |
+
content = f.read()
|
| 15 |
+
return content, None
|
| 16 |
+
except Exception as e:
|
| 17 |
+
return "", str(e)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def extract_file_blocks(content):
|
| 21 |
+
"""Extract filename and corresponding table data blocks from TXT content"""
|
| 22 |
+
file_blocks = {}
|
| 23 |
+
current_file = None
|
| 24 |
+
current_block = []
|
| 25 |
+
|
| 26 |
+
# Attempt to split content using filename markers
|
| 27 |
+
lines = content.strip().split('\n')
|
| 28 |
+
i = 0
|
| 29 |
+
while i < len(lines):
|
| 30 |
+
line = lines[i].strip()
|
| 31 |
+
|
| 32 |
+
# Detect filename line - supports multiple possible formats
|
| 33 |
+
file_name_match = None
|
| 34 |
+
if line.startswith("文件名:") or line.startswith("File name:"):
|
| 35 |
+
file_name_match = re.search(r'(?:文件名|File name):?\s*(.+?)(?:\s|$)', line)
|
| 36 |
+
elif "文件名" in line:
|
| 37 |
+
file_name_match = re.search(r'.*文件名:?\s*(.+?)(?:\s|$)', line)
|
| 38 |
+
elif re.match(r'^[^:]*\.xlsx?\s*$', line): # Case of direct filename.xls(x)
|
| 39 |
+
file_name_match = re.match(r'^([^:]*\.xlsx?)$', line)
|
| 40 |
+
|
| 41 |
+
if file_name_match:
|
| 42 |
+
# If we have a current file, save its data block
|
| 43 |
+
if current_file and current_block:
|
| 44 |
+
file_blocks[current_file] = '\n'.join(current_block)
|
| 45 |
+
|
| 46 |
+
# Extract new filename
|
| 47 |
+
current_file = file_name_match.group(1).strip()
|
| 48 |
+
current_block = []
|
| 49 |
+
elif current_file is not None:
|
| 50 |
+
# Add to current block
|
| 51 |
+
current_block.append(line)
|
| 52 |
+
|
| 53 |
+
i += 1
|
| 54 |
+
|
| 55 |
+
# Process last file block
|
| 56 |
+
if current_file and current_block:
|
| 57 |
+
file_blocks[current_file] = '\n'.join(current_block)
|
| 58 |
+
|
| 59 |
+
return file_blocks
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def parse_table_content(block_content):
|
| 63 |
+
"""Parse table block content to extract data structure"""
|
| 64 |
+
sheets_data = []
|
| 65 |
+
current_sheet = []
|
| 66 |
+
|
| 67 |
+
lines = block_content.split('\n')
|
| 68 |
+
i = 0
|
| 69 |
+
in_sheet = False
|
| 70 |
+
|
| 71 |
+
while i < len(lines):
|
| 72 |
+
line = lines[i].strip()
|
| 73 |
+
|
| 74 |
+
# Skip empty lines
|
| 75 |
+
if not line:
|
| 76 |
+
i += 1
|
| 77 |
+
continue
|
| 78 |
+
|
| 79 |
+
# Detect Sheet marker
|
| 80 |
+
sheet_match = re.search(r'Sheet:?', line)
|
| 81 |
+
if sheet_match:
|
| 82 |
+
# If new Sheet found, save previous sheet data
|
| 83 |
+
if in_sheet and current_sheet:
|
| 84 |
+
sheets_data.append(current_sheet)
|
| 85 |
+
current_sheet = []
|
| 86 |
+
|
| 87 |
+
in_sheet = True
|
| 88 |
+
i += 1
|
| 89 |
+
continue
|
| 90 |
+
|
| 91 |
+
# Process data rows
|
| 92 |
+
if in_sheet or not sheets_data: # Also try parsing if no explicit Sheet markers
|
| 93 |
+
# Clean line numbers
|
| 94 |
+
cleaned_line = re.sub(r'^\d+\s+', '', line)
|
| 95 |
+
|
| 96 |
+
# Split cell data
|
| 97 |
+
# First try tab or multiple spaces
|
| 98 |
+
cells = re.split(r'\s{2,}|\t', cleaned_line)
|
| 99 |
+
|
| 100 |
+
# If only one element after split, try single space
|
| 101 |
+
if len(cells) <= 1:
|
| 102 |
+
cells = re.split(r'\s+', cleaned_line)
|
| 103 |
+
|
| 104 |
+
if cells:
|
| 105 |
+
current_sheet.append(cells)
|
| 106 |
+
|
| 107 |
+
i += 1
|
| 108 |
+
|
| 109 |
+
# Save last sheet
|
| 110 |
+
if current_sheet:
|
| 111 |
+
sheets_data.append(current_sheet)
|
| 112 |
+
|
| 113 |
+
# If no explicit sheet separation but has data, treat as single sheet
|
| 114 |
+
if not sheets_data and lines:
|
| 115 |
+
# Re-parse as single sheet
|
| 116 |
+
current_sheet = []
|
| 117 |
+
for line in lines:
|
| 118 |
+
if line.strip():
|
| 119 |
+
# Clean line numbers
|
| 120 |
+
cleaned_line = re.sub(r'^\d+\s+', '', line)
|
| 121 |
+
# Split cells
|
| 122 |
+
cells = re.split(r'\s{2,}|\t', cleaned_line)
|
| 123 |
+
if len(cells) <= 1:
|
| 124 |
+
cells = re.split(r'\s+', cleaned_line)
|
| 125 |
+
if cells:
|
| 126 |
+
current_sheet.append(cells)
|
| 127 |
+
|
| 128 |
+
if current_sheet:
|
| 129 |
+
sheets_data.append(current_sheet)
|
| 130 |
+
|
| 131 |
+
return sheets_data
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def normalize_value(value):
|
| 135 |
+
"""Normalize cell value, handling various format differences"""
|
| 136 |
+
# Convert to string and strip whitespace
|
| 137 |
+
value_str = str(value).strip()
|
| 138 |
+
|
| 139 |
+
# Standardize date formats
|
| 140 |
+
date_match = re.match(r'(\d{4}[-/]\d{1,2}[-/]\d{1,2})(?:\s+\d{1,2}:\d{1,2}(?::\d{1,2})?)?', value_str)
|
| 141 |
+
if date_match:
|
| 142 |
+
# Extract date part
|
| 143 |
+
date_part = date_match.group(1)
|
| 144 |
+
# Unify separator to -
|
| 145 |
+
date_part = date_part.replace('/', '-')
|
| 146 |
+
# Ensure yyyy-mm-dd format
|
| 147 |
+
date_parts = date_part.split('-')
|
| 148 |
+
if len(date_parts) == 3:
|
| 149 |
+
year = date_parts[0]
|
| 150 |
+
month = date_parts[1].zfill(2)
|
| 151 |
+
day = date_parts[2].zfill(2)
|
| 152 |
+
value_str = f"{year}-{month}-{day}"
|
| 153 |
+
|
| 154 |
+
# Standardize number formats
|
| 155 |
+
number_match = re.match(r'^[-+]?\d+(?:\.\d+)?$', value_str)
|
| 156 |
+
if number_match:
|
| 157 |
+
try:
|
| 158 |
+
# Try converting to float, then remove trailing zeros
|
| 159 |
+
num_value = float(value_str)
|
| 160 |
+
# If integer, remove decimal point
|
| 161 |
+
if num_value.is_integer():
|
| 162 |
+
value_str = str(int(num_value))
|
| 163 |
+
else:
|
| 164 |
+
value_str = str(num_value).rstrip('0').rstrip('.')
|
| 165 |
+
except:
|
| 166 |
+
pass
|
| 167 |
+
|
| 168 |
+
return value_str
|
| 169 |
+
|
| 170 |
+
|
| 171 |
+
def calculate_sheet_similarity(pred_sheet, truth_sheet):
|
| 172 |
+
"""Calculate content similarity between two sheets"""
|
| 173 |
+
# Normalize all cell values
|
| 174 |
+
pred_values = set()
|
| 175 |
+
truth_values = set()
|
| 176 |
+
|
| 177 |
+
# Process prediction data
|
| 178 |
+
for row in pred_sheet:
|
| 179 |
+
for cell in row:
|
| 180 |
+
normalized = normalize_value(cell)
|
| 181 |
+
if normalized: # Ignore empty cells
|
| 182 |
+
pred_values.add(normalized)
|
| 183 |
+
|
| 184 |
+
# Process ground truth data
|
| 185 |
+
for row in truth_sheet:
|
| 186 |
+
for cell in row:
|
| 187 |
+
normalized = normalize_value(cell)
|
| 188 |
+
if normalized: # Ignore empty cells
|
| 189 |
+
truth_values.add(normalized)
|
| 190 |
+
|
| 191 |
+
# Calculate intersection and union
|
| 192 |
+
intersection = pred_values.intersection(truth_values)
|
| 193 |
+
union = pred_values.union(truth_values)
|
| 194 |
+
|
| 195 |
+
# Jaccard similarity
|
| 196 |
+
if not union:
|
| 197 |
+
return 0.0
|
| 198 |
+
|
| 199 |
+
similarity = len(intersection) / len(union) * 100
|
| 200 |
+
return similarity
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def evaluate_file_similarity(pred_sheets, truth_sheets):
|
| 204 |
+
"""Evaluate file similarity"""
|
| 205 |
+
if not pred_sheets or not truth_sheets:
|
| 206 |
+
return 0.0
|
| 207 |
+
|
| 208 |
+
# Calculate similarity for each sheet
|
| 209 |
+
total_similarity = 0.0
|
| 210 |
+
sheet_count = min(len(pred_sheets), len(truth_sheets))
|
| 211 |
+
|
| 212 |
+
for i in range(sheet_count):
|
| 213 |
+
sheet_similarity = calculate_sheet_similarity(
|
| 214 |
+
pred_sheets[i],
|
| 215 |
+
truth_sheets[i]
|
| 216 |
+
)
|
| 217 |
+
total_similarity += sheet_similarity
|
| 218 |
+
|
| 219 |
+
# Average similarity
|
| 220 |
+
avg_similarity = total_similarity / sheet_count if sheet_count > 0 else 0.0
|
| 221 |
+
|
| 222 |
+
# Reduce similarity if sheet counts differ
|
| 223 |
+
sheet_diff_penalty = abs(len(pred_sheets) - len(truth_sheets)) * 5 # 5% reduction per extra/missing sheet
|
| 224 |
+
final_similarity = max(0, avg_similarity - sheet_diff_penalty)
|
| 225 |
+
|
| 226 |
+
return final_similarity
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def evaluate(pred_file, truth_file):
|
| 230 |
+
"""Evaluation function"""
|
| 231 |
+
pred_content, pred_err = load_txt_file(pred_file)
|
| 232 |
+
truth_content, truth_err = load_txt_file(truth_file)
|
| 233 |
+
|
| 234 |
+
process_ok = True
|
| 235 |
+
comments = []
|
| 236 |
+
|
| 237 |
+
# Read error checking
|
| 238 |
+
if pred_err:
|
| 239 |
+
comments.append(f"[Prediction file read error] {pred_err}")
|
| 240 |
+
process_ok = False
|
| 241 |
+
if truth_err:
|
| 242 |
+
comments.append(f"[GT file read error] {truth_err}")
|
| 243 |
+
process_ok = False
|
| 244 |
+
if not process_ok:
|
| 245 |
+
return {
|
| 246 |
+
"Process": False,
|
| 247 |
+
"Result": False,
|
| 248 |
+
"TimePoint": datetime.now().isoformat(),
|
| 249 |
+
"comments": "\n".join(comments)
|
| 250 |
+
}
|
| 251 |
+
|
| 252 |
+
# Extract file blocks
|
| 253 |
+
pred_file_blocks = extract_file_blocks(pred_content)
|
| 254 |
+
truth_file_blocks = extract_file_blocks(truth_content)
|
| 255 |
+
|
| 256 |
+
# Filename comparison
|
| 257 |
+
pred_files = set(pred_file_blocks.keys())
|
| 258 |
+
truth_files = set(truth_file_blocks.keys())
|
| 259 |
+
|
| 260 |
+
comments.append(f"Prediction file contains {len(pred_files)} Excel data blocks")
|
| 261 |
+
comments.append(f"GT file contains {len(truth_files)} Excel data blocks")
|
| 262 |
+
|
| 263 |
+
if len(pred_files) == 0:
|
| 264 |
+
comments.append("⚠️ No Excel data blocks found in prediction file!")
|
| 265 |
+
return {
|
| 266 |
+
"Process": True,
|
| 267 |
+
"Result": False,
|
| 268 |
+
"TimePoint": datetime.now().isoformat(),
|
| 269 |
+
"comments": "\n".join(comments)
|
| 270 |
+
}
|
| 271 |
+
|
| 272 |
+
if len(truth_files) == 0:
|
| 273 |
+
comments.append("⚠️ No Excel data blocks found in GT file!")
|
| 274 |
+
return {
|
| 275 |
+
"Process": True,
|
| 276 |
+
"Result": False,
|
| 277 |
+
"TimePoint": datetime.now().isoformat(),
|
| 278 |
+
"comments": "\n".join(comments)
|
| 279 |
+
}
|
| 280 |
+
|
| 281 |
+
# Calculate file matching
|
| 282 |
+
common_files = pred_files.intersection(truth_files)
|
| 283 |
+
missing_files = truth_files - pred_files
|
| 284 |
+
extra_files = pred_files - truth_files
|
| 285 |
+
|
| 286 |
+
# Filename match rate
|
| 287 |
+
file_match_rate = len(common_files) / len(truth_files) * 100 if truth_files else 0
|
| 288 |
+
comments.append(f"Filename match rate: {file_match_rate:.2f}%")
|
| 289 |
+
|
| 290 |
+
if missing_files:
|
| 291 |
+
comments.append(f"Missing files: {', '.join(missing_files)}")
|
| 292 |
+
if extra_files:
|
| 293 |
+
comments.append(f"Extra files: {', '.join(extra_files)}")
|
| 294 |
+
|
| 295 |
+
# Content similarity scoring
|
| 296 |
+
total_similarity = 0.0
|
| 297 |
+
file_count = 0
|
| 298 |
+
|
| 299 |
+
# Process exactly matched filenames
|
| 300 |
+
for file_name in common_files:
|
| 301 |
+
pred_content_block = pred_file_blocks[file_name]
|
| 302 |
+
truth_content_block = truth_file_blocks[file_name]
|
| 303 |
+
|
| 304 |
+
# Parse table content
|
| 305 |
+
pred_sheets = parse_table_content(pred_content_block)
|
| 306 |
+
truth_sheets = parse_table_content(truth_content_block)
|
| 307 |
+
|
| 308 |
+
# Calculate file similarity
|
| 309 |
+
file_similarity = evaluate_file_similarity(pred_sheets, truth_sheets)
|
| 310 |
+
|
| 311 |
+
comments.append(f"File '{file_name}' content similarity: {file_similarity:.2f}%")
|
| 312 |
+
total_similarity += file_similarity
|
| 313 |
+
file_count += 1
|
| 314 |
+
|
| 315 |
+
# Attempt to match files with slightly different names by content
|
| 316 |
+
if missing_files and extra_files:
|
| 317 |
+
for missing_file in list(missing_files):
|
| 318 |
+
best_match = None
|
| 319 |
+
best_similarity = 0
|
| 320 |
+
|
| 321 |
+
truth_content_block = truth_file_blocks[missing_file]
|
| 322 |
+
truth_sheets = parse_table_content(truth_content_block)
|
| 323 |
+
|
| 324 |
+
for extra_file in list(extra_files):
|
| 325 |
+
pred_content_block = pred_file_blocks[extra_file]
|
| 326 |
+
pred_sheets = parse_table_content(pred_content_block)
|
| 327 |
+
|
| 328 |
+
similarity = evaluate_file_similarity(pred_sheets, truth_sheets)
|
| 329 |
+
if similarity > best_similarity and similarity > 60: # Require at least 60% similarity
|
| 330 |
+
best_similarity = similarity
|
| 331 |
+
best_match = extra_file
|
| 332 |
+
|
| 333 |
+
if best_match:
|
| 334 |
+
comments.append(
|
| 335 |
+
f"Different filenames but similar content: '{missing_file}' and '{best_match}', similarity: {best_similarity:.2f}%")
|
| 336 |
+
total_similarity += best_similarity
|
| 337 |
+
file_count += 1
|
| 338 |
+
missing_files.remove(missing_file)
|
| 339 |
+
extra_files.remove(best_match)
|
| 340 |
+
|
| 341 |
+
# Calculate average file similarity
|
| 342 |
+
avg_similarity = total_similarity / len(truth_files) if truth_files else 0
|
| 343 |
+
|
| 344 |
+
# Penalize for missing files
|
| 345 |
+
if missing_files:
|
| 346 |
+
missing_penalty = len(missing_files) / len(truth_files) * 100
|
| 347 |
+
comments.append(f"Score reduction for missing files: -{missing_penalty:.2f}%")
|
| 348 |
+
avg_similarity = max(0, avg_similarity - missing_penalty)
|
| 349 |
+
|
| 350 |
+
comments.append(f"Average content similarity across all files: {avg_similarity:.2f}%")
|
| 351 |
+
|
| 352 |
+
# File match and content similarity weights
|
| 353 |
+
file_match_weight = 0.3
|
| 354 |
+
content_similarity_weight = 0.7
|
| 355 |
+
|
| 356 |
+
# Final score
|
| 357 |
+
final_score = (file_match_rate * file_match_weight + avg_similarity * content_similarity_weight)
|
| 358 |
+
passed = final_score >= 75
|
| 359 |
+
|
| 360 |
+
comments.append(
|
| 361 |
+
f"Final score (filename match {file_match_weight * 100}% + content similarity {content_similarity_weight * 100}%): {final_score:.2f}% (threshold=75%)")
|
| 362 |
+
|
| 363 |
+
if passed:
|
| 364 |
+
comments.append("✅ Test passed!")
|
| 365 |
+
else:
|
| 366 |
+
comments.append("❌ Test failed!")
|
| 367 |
+
|
| 368 |
+
# If structured parsing fails, try basic content comparison
|
| 369 |
+
if file_count == 0:
|
| 370 |
+
comments.append("Note: No matches found in structured parsing, attempting basic content comparison")
|
| 371 |
+
|
| 372 |
+
# Calculate word overlap in full content
|
| 373 |
+
pred_words = set(re.findall(r'\b\w+\b', pred_content.lower()))
|
| 374 |
+
truth_words = set(re.findall(r'\b\w+\b', truth_content.lower()))
|
| 375 |
+
|
| 376 |
+
common_words = pred_words.intersection(truth_words)
|
| 377 |
+
all_words = pred_words.union(truth_words)
|
| 378 |
+
|
| 379 |
+
if all_words:
|
| 380 |
+
word_overlap = len(common_words) / len(all_words) * 100
|
| 381 |
+
comments.append(f"Content word overlap: {word_overlap:.2f}%")
|
| 382 |
+
|
| 383 |
+
# Pass if high word overlap
|
| 384 |
+
if word_overlap >= 75:
|
| 385 |
+
comments.append("Based on word overlap: ✅ Test passed!")
|
| 386 |
+
return {
|
| 387 |
+
"Process": True,
|
| 388 |
+
"Result": True,
|
| 389 |
+
"TimePoint": datetime.now().isoformat(),
|
| 390 |
+
"comments": "\n".join(comments)
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
return {
|
| 394 |
+
"Process": True,
|
| 395 |
+
"Result": passed,
|
| 396 |
+
"TimePoint": datetime.now().isoformat(),
|
| 397 |
+
"comments": "\n".join(comments)
|
| 398 |
+
}
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
def append_result_to_jsonl(result_path, result_dict):
|
| 402 |
+
os.makedirs(os.path.dirname(result_path) or '.', exist_ok=True)
|
| 403 |
+
with open(result_path, "a", encoding="utf-8") as f:
|
| 404 |
+
json.dump(result_dict, f, ensure_ascii=False, default=str)
|
| 405 |
+
f.write("\n")
|
| 406 |
+
|
| 407 |
+
|
| 408 |
+
if __name__ == "__main__":
|
| 409 |
+
parser = argparse.ArgumentParser()
|
| 410 |
+
parser.add_argument("--output", type=str, required=True, help="Path to extracted tables")
|
| 411 |
+
parser.add_argument("--groundtruth", type=str, required=True, help="Path to standard tables")
|
| 412 |
+
parser.add_argument("--result", type=str, required=True, help="Output JSONL file path for results")
|
| 413 |
+
args = parser.parse_args()
|
| 414 |
+
|
| 415 |
+
result_dict = evaluate(args.output, args.groundtruth)
|
| 416 |
+
append_result_to_jsonl(args.result, result_dict)
|
| 417 |
+
print("[Evaluation complete] Result summary:")
|
| 418 |
+
print(json.dumps(result_dict, ensure_ascii=False, indent=2, default=str))
|
test_scripts/Eparse_02/test_script.py
ADDED
|
@@ -0,0 +1,360 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
import re
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
from ast import literal_eval
|
| 7 |
+
import traceback
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def normalize_json_item(item):
|
| 11 |
+
"""
|
| 12 |
+
Normalize JSON objects to remove format differences
|
| 13 |
+
"""
|
| 14 |
+
if isinstance(item, str):
|
| 15 |
+
# Try parsing string as dictionary
|
| 16 |
+
try:
|
| 17 |
+
item = json.loads(item)
|
| 18 |
+
except:
|
| 19 |
+
try:
|
| 20 |
+
# Try parsing non-standard JSON with ast.literal_eval
|
| 21 |
+
item = literal_eval(item)
|
| 22 |
+
except:
|
| 23 |
+
# Return original string if parsing fails
|
| 24 |
+
return item
|
| 25 |
+
|
| 26 |
+
# Ensure all keys are strings
|
| 27 |
+
result = {}
|
| 28 |
+
for k, v in item.items():
|
| 29 |
+
# Convert key to string and strip whitespace
|
| 30 |
+
key = str(k).strip().lower()
|
| 31 |
+
# Process value
|
| 32 |
+
if isinstance(v, str):
|
| 33 |
+
# Strip whitespace and lowercase string values (except for type field)
|
| 34 |
+
if key == 'type':
|
| 35 |
+
value = v.strip() # Preserve case for type field
|
| 36 |
+
else:
|
| 37 |
+
value = v.strip().lower()
|
| 38 |
+
else:
|
| 39 |
+
value = v
|
| 40 |
+
result[key] = value
|
| 41 |
+
|
| 42 |
+
return result
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def parse_gt_format(content):
|
| 46 |
+
"""
|
| 47 |
+
Special parser for gt.txt format
|
| 48 |
+
Features multi-line dictionaries separated by commas
|
| 49 |
+
"""
|
| 50 |
+
items = []
|
| 51 |
+
# Replace all newlines to make content single line
|
| 52 |
+
content = content.replace('\n', ' ')
|
| 53 |
+
|
| 54 |
+
# Try parsing as list
|
| 55 |
+
try:
|
| 56 |
+
# If content is wrapped in brackets, remove them
|
| 57 |
+
if content.strip().startswith('[') and content.strip().endswith(']'):
|
| 58 |
+
content = content.strip()[1:-1]
|
| 59 |
+
|
| 60 |
+
# Split multiple dictionaries (each starts with { and ends with }, or })
|
| 61 |
+
dict_pattern = r'\{[^{}]*\}'
|
| 62 |
+
dict_matches = re.findall(dict_pattern, content)
|
| 63 |
+
|
| 64 |
+
for dict_str in dict_matches:
|
| 65 |
+
try:
|
| 66 |
+
item = literal_eval(dict_str)
|
| 67 |
+
items.append(normalize_json_item(item))
|
| 68 |
+
except Exception as e:
|
| 69 |
+
print(f"Failed to parse dictionary: {dict_str[:50]}... Error: {e}")
|
| 70 |
+
except Exception as e:
|
| 71 |
+
print(f"Failed to parse gt format: {e}")
|
| 72 |
+
|
| 73 |
+
print(f"Parsed {len(items)} items from gt format")
|
| 74 |
+
return items
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def parse_json_line_format(content):
|
| 78 |
+
"""
|
| 79 |
+
Parse format with one JSON object per line
|
| 80 |
+
"""
|
| 81 |
+
items = []
|
| 82 |
+
lines = content.split('\n')
|
| 83 |
+
|
| 84 |
+
for line in lines:
|
| 85 |
+
line = line.strip()
|
| 86 |
+
if not line:
|
| 87 |
+
continue
|
| 88 |
+
|
| 89 |
+
try:
|
| 90 |
+
# Try parsing as JSON object
|
| 91 |
+
item = json.loads(line)
|
| 92 |
+
items.append(normalize_json_item(item))
|
| 93 |
+
except:
|
| 94 |
+
try:
|
| 95 |
+
# Try parsing non-standard JSON with ast.literal_eval
|
| 96 |
+
item = literal_eval(line)
|
| 97 |
+
items.append(normalize_json_item(item))
|
| 98 |
+
except Exception as e:
|
| 99 |
+
print(f"Unable to parse line: {line[:50]}... Error: {e}")
|
| 100 |
+
|
| 101 |
+
print(f"Parsed {len(items)} items from JSON line format")
|
| 102 |
+
return items
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def load_json_items(file_path):
|
| 106 |
+
"""
|
| 107 |
+
Read file containing JSON objects, return normalized list
|
| 108 |
+
Supports multiple formats: JSON per line, single JSON array, list of dicts, multi-line dict format
|
| 109 |
+
"""
|
| 110 |
+
print(f"Parsing file: {file_path}")
|
| 111 |
+
items = []
|
| 112 |
+
|
| 113 |
+
try:
|
| 114 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
| 115 |
+
content = f.read().strip()
|
| 116 |
+
|
| 117 |
+
# Check if file is empty
|
| 118 |
+
if not content:
|
| 119 |
+
print("File is empty")
|
| 120 |
+
return items
|
| 121 |
+
|
| 122 |
+
# 1. First try gt.txt specific format
|
| 123 |
+
if "{'c_header':" in content or '{"c_header":' in content or "'c_header':" in content:
|
| 124 |
+
print("Detected gt.txt format, using specialized parser")
|
| 125 |
+
items = parse_gt_format(content)
|
| 126 |
+
if items:
|
| 127 |
+
return items
|
| 128 |
+
|
| 129 |
+
# 2. Try parsing as JSON array
|
| 130 |
+
if content.startswith('[') and content.endswith(']'):
|
| 131 |
+
try:
|
| 132 |
+
array_items = json.loads(content)
|
| 133 |
+
for item in array_items:
|
| 134 |
+
items.append(normalize_json_item(item))
|
| 135 |
+
print(f"Parsed {len(items)} items from JSON array")
|
| 136 |
+
return items
|
| 137 |
+
except Exception as e:
|
| 138 |
+
print(f"JSON array parsing failed: {e}")
|
| 139 |
+
|
| 140 |
+
# Try Python list literal
|
| 141 |
+
try:
|
| 142 |
+
array_items = literal_eval(content)
|
| 143 |
+
for item in array_items:
|
| 144 |
+
items.append(normalize_json_item(item))
|
| 145 |
+
print(f"Parsed {len(items)} items from Python list")
|
| 146 |
+
return items
|
| 147 |
+
except Exception as e:
|
| 148 |
+
print(f"Python list parsing failed: {e}")
|
| 149 |
+
|
| 150 |
+
# 3. Try parsing JSON objects line by line
|
| 151 |
+
items = parse_json_line_format(content)
|
| 152 |
+
if items:
|
| 153 |
+
return items
|
| 154 |
+
|
| 155 |
+
# 4. Finally try extracting all possible dictionaries
|
| 156 |
+
print("Attempting to extract all possible dictionaries...")
|
| 157 |
+
dict_pattern = r'\{[^{}]*\}'
|
| 158 |
+
dicts = re.findall(dict_pattern, content)
|
| 159 |
+
for d in dicts:
|
| 160 |
+
try:
|
| 161 |
+
item = literal_eval(d)
|
| 162 |
+
items.append(normalize_json_item(item))
|
| 163 |
+
except Exception as e:
|
| 164 |
+
print(f"Dictionary extraction failed: {d[:30]}... Error: {e}")
|
| 165 |
+
|
| 166 |
+
if items:
|
| 167 |
+
print(f"Parsed {len(items)} items from dictionary extraction")
|
| 168 |
+
return items
|
| 169 |
+
|
| 170 |
+
except Exception as e:
|
| 171 |
+
print(f"Error reading file: {e}")
|
| 172 |
+
traceback.print_exc()
|
| 173 |
+
|
| 174 |
+
return items
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def compare_items(pred_items, gt_items):
|
| 178 |
+
"""
|
| 179 |
+
Compare predicted items with ground truth items, calculate match rate for key fields
|
| 180 |
+
"""
|
| 181 |
+
if not pred_items or not gt_items:
|
| 182 |
+
return 0, "No valid items to compare"
|
| 183 |
+
|
| 184 |
+
print(f"Comparing {len(pred_items)} predicted items with {len(gt_items)} ground truth items")
|
| 185 |
+
|
| 186 |
+
# List of key fields we consider more important
|
| 187 |
+
key_fields = ['value', 'row', 'column', 'excel_rc', 'c_header', 'r_header', 'sheet', 'f_name']
|
| 188 |
+
|
| 189 |
+
total_matches = 0
|
| 190 |
+
total_fields = 0
|
| 191 |
+
missing_items = 0
|
| 192 |
+
|
| 193 |
+
# Number of items to match (based on smaller set)
|
| 194 |
+
expected_matches = min(len(pred_items), len(gt_items))
|
| 195 |
+
|
| 196 |
+
# If predicted items are fewer than ground truth, record missing count
|
| 197 |
+
if len(pred_items) < len(gt_items):
|
| 198 |
+
missing_items = len(gt_items) - len(pred_items)
|
| 199 |
+
|
| 200 |
+
# Print sample data for debugging
|
| 201 |
+
print("Predicted items sample:")
|
| 202 |
+
for i, item in enumerate(pred_items[:2]):
|
| 203 |
+
print(f" Item {i}: {item}")
|
| 204 |
+
|
| 205 |
+
print("Ground truth items sample:")
|
| 206 |
+
for i, item in enumerate(gt_items[:2]):
|
| 207 |
+
print(f" Item {i}: {item}")
|
| 208 |
+
|
| 209 |
+
# Compare item by item
|
| 210 |
+
for i in range(min(len(pred_items), len(gt_items))):
|
| 211 |
+
pred_item = pred_items[i]
|
| 212 |
+
gt_item = gt_items[i]
|
| 213 |
+
|
| 214 |
+
item_matches = 0
|
| 215 |
+
item_fields = 0
|
| 216 |
+
|
| 217 |
+
# Compare key fields
|
| 218 |
+
for field in key_fields:
|
| 219 |
+
# For predicted item, try to match different case variations
|
| 220 |
+
pred_fields = [f.lower() for f in pred_item.keys()]
|
| 221 |
+
pred_field_key = None
|
| 222 |
+
|
| 223 |
+
# Find matching field (case insensitive)
|
| 224 |
+
if field in pred_item:
|
| 225 |
+
pred_field_key = field
|
| 226 |
+
else:
|
| 227 |
+
for k in pred_item.keys():
|
| 228 |
+
if k.lower() == field.lower():
|
| 229 |
+
pred_field_key = k
|
| 230 |
+
break
|
| 231 |
+
|
| 232 |
+
# For ground truth item, also try to match different case variations
|
| 233 |
+
gt_field_key = None
|
| 234 |
+
if field in gt_item:
|
| 235 |
+
gt_field_key = field
|
| 236 |
+
else:
|
| 237 |
+
for k in gt_item.keys():
|
| 238 |
+
if k.lower() == field.lower():
|
| 239 |
+
gt_field_key = k
|
| 240 |
+
break
|
| 241 |
+
|
| 242 |
+
# If both have the field, compare
|
| 243 |
+
if pred_field_key is not None and gt_field_key is not None:
|
| 244 |
+
item_fields += 1
|
| 245 |
+
pred_value = pred_item[pred_field_key]
|
| 246 |
+
gt_value = gt_item[gt_field_key]
|
| 247 |
+
|
| 248 |
+
# Case insensitive comparison for excel_rc field
|
| 249 |
+
if field.lower() == 'excel_rc':
|
| 250 |
+
if str(pred_value).upper() == str(gt_value).upper():
|
| 251 |
+
item_matches += 1
|
| 252 |
+
# Numeric comparison for numeric fields
|
| 253 |
+
elif field.lower() in ['row', 'column'] and isinstance(pred_value, (int, float)) and isinstance(
|
| 254 |
+
gt_value, (int, float)):
|
| 255 |
+
if pred_value == gt_value:
|
| 256 |
+
item_matches += 1
|
| 257 |
+
# Special handling for type field
|
| 258 |
+
elif field.lower() == 'type':
|
| 259 |
+
# Extract type name, ignoring quotes and class parts
|
| 260 |
+
pred_type = re.sub(r"[<>'\"]|class\s+", "", str(pred_value)).strip()
|
| 261 |
+
gt_type = re.sub(r"[<>'\"]|class\s+", "", str(gt_value)).strip()
|
| 262 |
+
if pred_type == gt_type:
|
| 263 |
+
item_matches += 1
|
| 264 |
+
# Standardized comparison for other fields
|
| 265 |
+
elif str(pred_value).lower() == str(gt_value).lower():
|
| 266 |
+
item_matches += 1
|
| 267 |
+
|
| 268 |
+
# Add to total matches
|
| 269 |
+
if item_fields > 0:
|
| 270 |
+
total_matches += item_matches
|
| 271 |
+
total_fields += item_fields
|
| 272 |
+
|
| 273 |
+
# Calculate match rate
|
| 274 |
+
if total_fields == 0:
|
| 275 |
+
accuracy = 0
|
| 276 |
+
details = "No valid fields found for comparison"
|
| 277 |
+
else:
|
| 278 |
+
accuracy = total_matches / total_fields
|
| 279 |
+
details = f"Matched fields: {total_matches}/{total_fields}"
|
| 280 |
+
|
| 281 |
+
# Apply penalty if missing items
|
| 282 |
+
if missing_items > 0:
|
| 283 |
+
penalty = min(0.2, missing_items / len(gt_items) * 0.5) # Max 20% penalty
|
| 284 |
+
accuracy = max(0, accuracy - penalty)
|
| 285 |
+
details += f", Missing items: {missing_items}, applied {penalty:.2f} penalty"
|
| 286 |
+
|
| 287 |
+
return accuracy, details
|
| 288 |
+
|
| 289 |
+
|
| 290 |
+
def evaluate(pred_path, gt_path):
|
| 291 |
+
"""Evaluate based on parsed content"""
|
| 292 |
+
threshold = 0.70 # Fixed threshold
|
| 293 |
+
|
| 294 |
+
# Load and normalize data
|
| 295 |
+
pred_items = load_json_items(pred_path)
|
| 296 |
+
gt_items = load_json_items(gt_path)
|
| 297 |
+
|
| 298 |
+
# Initialize result dictionary
|
| 299 |
+
result = {
|
| 300 |
+
"Process": True,
|
| 301 |
+
"Result": False,
|
| 302 |
+
"TimePoint": datetime.now().strftime("%Y-%m-%dT%H:%M:%S"),
|
| 303 |
+
"comments": ""
|
| 304 |
+
}
|
| 305 |
+
|
| 306 |
+
# Check if input files are valid
|
| 307 |
+
if not pred_items:
|
| 308 |
+
result["Process"] = False
|
| 309 |
+
result["comments"] = "Predicted file parsed as empty, cannot evaluate!"
|
| 310 |
+
return result
|
| 311 |
+
|
| 312 |
+
if not gt_items:
|
| 313 |
+
result["Process"] = False
|
| 314 |
+
result["comments"] = "❌ Ground truth parsed as empty!"
|
| 315 |
+
result["Result"] = False
|
| 316 |
+
return result
|
| 317 |
+
|
| 318 |
+
# Compare items and calculate match rate
|
| 319 |
+
accuracy, details = compare_items(pred_items, gt_items)
|
| 320 |
+
|
| 321 |
+
# Determine result based on accuracy
|
| 322 |
+
if accuracy >= threshold:
|
| 323 |
+
result["Result"] = True
|
| 324 |
+
result["comments"] = f"✅ Test passed! Content match rate={accuracy:.4f} ≥ {threshold}. {details}"
|
| 325 |
+
else:
|
| 326 |
+
result["Result"] = False
|
| 327 |
+
result["comments"] = f"❌ Test failed. Content match rate={accuracy:.4f} < {threshold}. {details}"
|
| 328 |
+
|
| 329 |
+
print(result["comments"])
|
| 330 |
+
return result
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
def save_result(result, result_file):
|
| 334 |
+
"""Save results to jsonl file"""
|
| 335 |
+
# Ensure directory exists
|
| 336 |
+
os.makedirs(os.path.dirname(result_file) or '.', exist_ok=True)
|
| 337 |
+
|
| 338 |
+
# Write in append mode, ensuring each record is on separate line
|
| 339 |
+
with open(result_file, "a", encoding="utf-8") as f:
|
| 340 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 341 |
+
|
| 342 |
+
|
| 343 |
+
def main():
|
| 344 |
+
parser = argparse.ArgumentParser(
|
| 345 |
+
description="Evaluate Excel cell parsing output by content similarity and save results")
|
| 346 |
+
parser.add_argument("--output", required=True, help="Predicted output file path")
|
| 347 |
+
parser.add_argument("--groundtruth", required=True, help="Ground truth file path")
|
| 348 |
+
parser.add_argument("--result", required=True, help="Result output file path (JSONL format)")
|
| 349 |
+
args = parser.parse_args()
|
| 350 |
+
|
| 351 |
+
# Get evaluation result
|
| 352 |
+
result = evaluate(args.output, args.groundtruth)
|
| 353 |
+
|
| 354 |
+
# Save result to specified jsonl file
|
| 355 |
+
save_result(result, args.result)
|
| 356 |
+
print(f"Results saved to {args.result}")
|
| 357 |
+
|
| 358 |
+
|
| 359 |
+
if __name__ == "__main__":
|
| 360 |
+
main()
|
test_scripts/Eparse_03/test_script.py
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import json
|
| 3 |
+
import os
|
| 4 |
+
import datetime
|
| 5 |
+
import re
|
| 6 |
+
from collections import defaultdict
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def normalize_value(value):
|
| 10 |
+
"""Standardize values to handle consistency between numbers and strings"""
|
| 11 |
+
if value is None:
|
| 12 |
+
return "null"
|
| 13 |
+
if isinstance(value, (int, float)):
|
| 14 |
+
return str(value)
|
| 15 |
+
return str(value).strip()
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def extract_cell_data(json_data):
|
| 19 |
+
"""Extract cell data from different JSON structures, returning standardized cell collections"""
|
| 20 |
+
cells = []
|
| 21 |
+
|
| 22 |
+
# Handle GT format
|
| 23 |
+
if isinstance(json_data, dict) and "sheets" in json_data:
|
| 24 |
+
for sheet_name, sheet_data in json_data["sheets"].items():
|
| 25 |
+
for row in sheet_data.get("rows", []):
|
| 26 |
+
row_index = row.get("row_index", 0)
|
| 27 |
+
for cell in row.get("cells", []):
|
| 28 |
+
cells.append({
|
| 29 |
+
"row": row_index,
|
| 30 |
+
"column": cell.get("column_index"),
|
| 31 |
+
"value": normalize_value(cell.get("value")),
|
| 32 |
+
"excel_RC": cell.get("excel_RC"),
|
| 33 |
+
"c_header": cell.get("c_header"),
|
| 34 |
+
"r_header": cell.get("r_header"),
|
| 35 |
+
"type": cell.get("type")
|
| 36 |
+
})
|
| 37 |
+
|
| 38 |
+
# Handle Agent output format
|
| 39 |
+
elif isinstance(json_data, list):
|
| 40 |
+
for table in json_data:
|
| 41 |
+
for cell in table.get("data", []):
|
| 42 |
+
cells.append({
|
| 43 |
+
"row": cell.get("row"),
|
| 44 |
+
"column": cell.get("column"),
|
| 45 |
+
"value": normalize_value(cell.get("value")),
|
| 46 |
+
"excel_RC": cell.get("excel_RC"),
|
| 47 |
+
"c_header": cell.get("c_header"),
|
| 48 |
+
"r_header": cell.get("r_header"),
|
| 49 |
+
"type": cell.get("type")
|
| 50 |
+
})
|
| 51 |
+
|
| 52 |
+
# Handle other possible formats
|
| 53 |
+
else:
|
| 54 |
+
try:
|
| 55 |
+
# Attempt recursive search for all cell-like data structures
|
| 56 |
+
def find_cells(obj, path=""):
|
| 57 |
+
if isinstance(obj, dict):
|
| 58 |
+
# Check if it's a cell structure
|
| 59 |
+
if all(k in obj for k in ["row", "column", "value"]) or \
|
| 60 |
+
all(k in obj for k in ["row_index", "column_index", "value"]):
|
| 61 |
+
row = obj.get("row", obj.get("row_index", 0))
|
| 62 |
+
column = obj.get("column", obj.get("column_index", 0))
|
| 63 |
+
cells.append({
|
| 64 |
+
"row": row,
|
| 65 |
+
"column": column,
|
| 66 |
+
"value": normalize_value(obj.get("value")),
|
| 67 |
+
"excel_RC": obj.get("excel_RC", f"{chr(65 + column)}{row + 1}"),
|
| 68 |
+
"c_header": obj.get("c_header", str(column + 1)),
|
| 69 |
+
"r_header": obj.get("r_header", str(row + 1)),
|
| 70 |
+
"type": obj.get("type", "")
|
| 71 |
+
})
|
| 72 |
+
else:
|
| 73 |
+
for k, v in obj.items():
|
| 74 |
+
find_cells(v, f"{path}.{k}" if path else k)
|
| 75 |
+
elif isinstance(obj, list):
|
| 76 |
+
for i, item in enumerate(obj):
|
| 77 |
+
find_cells(item, f"{path}[{i}]")
|
| 78 |
+
|
| 79 |
+
find_cells(json_data)
|
| 80 |
+
except Exception as e:
|
| 81 |
+
print(f"Failed to parse JSON structure: {str(e)}")
|
| 82 |
+
|
| 83 |
+
return cells
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def compute_cell_similarity(cells1, cells2):
|
| 87 |
+
"""Calculate similarity between two cell collections"""
|
| 88 |
+
if not cells1 and not cells2:
|
| 89 |
+
return 1.0 # Both empty, consider perfect match
|
| 90 |
+
|
| 91 |
+
if not cells1 or not cells2:
|
| 92 |
+
return 0.0 # One empty, the other not
|
| 93 |
+
|
| 94 |
+
# Create position-to-cell mappings
|
| 95 |
+
cells1_dict = {(cell["row"], cell["column"]): cell for cell in cells1}
|
| 96 |
+
cells2_dict = {(cell["row"], cell["column"]): cell for cell in cells2}
|
| 97 |
+
|
| 98 |
+
# Get all unique cell positions
|
| 99 |
+
all_positions = set(cells1_dict.keys()) | set(cells2_dict.keys())
|
| 100 |
+
|
| 101 |
+
# Calculate matched cell count
|
| 102 |
+
matches = 0
|
| 103 |
+
total_weight = 0
|
| 104 |
+
|
| 105 |
+
for pos in all_positions:
|
| 106 |
+
weight = 1 # Cell weight
|
| 107 |
+
total_weight += weight
|
| 108 |
+
|
| 109 |
+
if pos in cells1_dict and pos in cells2_dict:
|
| 110 |
+
cell1 = cells1_dict[pos]
|
| 111 |
+
cell2 = cells2_dict[pos]
|
| 112 |
+
|
| 113 |
+
# Compare values
|
| 114 |
+
if normalize_value(cell1["value"]) == normalize_value(cell2["value"]):
|
| 115 |
+
matches += 0.6 * weight # Value match contributes 60% weight
|
| 116 |
+
|
| 117 |
+
# Compare other metadata (10% weight each)
|
| 118 |
+
for key in ["excel_RC", "c_header", "r_header", "type"]:
|
| 119 |
+
if key in cell1 and key in cell2 and normalize_value(cell1[key]) == normalize_value(cell2[key]):
|
| 120 |
+
matches += 0.1 * weight
|
| 121 |
+
|
| 122 |
+
similarity = matches / total_weight if total_weight > 0 else 0
|
| 123 |
+
return similarity
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def is_valid_file(file_path):
|
| 127 |
+
"""Check if file exists and is not empty"""
|
| 128 |
+
return os.path.isfile(file_path) and os.path.getsize(file_path) > 0
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def evaluate(pred_file, gt_file):
|
| 132 |
+
"""Read files and calculate similarity"""
|
| 133 |
+
try:
|
| 134 |
+
with open(pred_file, 'r', encoding='utf-8') as f_pred:
|
| 135 |
+
pred_text = f_pred.read()
|
| 136 |
+
with open(gt_file, 'r', encoding='utf-8') as f_gt:
|
| 137 |
+
gt_text = f_gt.read()
|
| 138 |
+
except Exception as e:
|
| 139 |
+
return None, f"❌ File read error: {str(e)}"
|
| 140 |
+
|
| 141 |
+
try:
|
| 142 |
+
# Parse JSON
|
| 143 |
+
pred_json = json.loads(pred_text)
|
| 144 |
+
gt_json = json.loads(gt_text)
|
| 145 |
+
|
| 146 |
+
# Extract cell data
|
| 147 |
+
pred_cells = extract_cell_data(pred_json)
|
| 148 |
+
gt_cells = extract_cell_data(gt_json)
|
| 149 |
+
|
| 150 |
+
# Calculate cell content similarity
|
| 151 |
+
similarity = compute_cell_similarity(pred_cells, gt_cells)
|
| 152 |
+
|
| 153 |
+
return similarity, f"✅ Similarity calculation complete: {similarity:.4f} ({len(gt_cells)} GT cells, {len(pred_cells)} predicted cells)"
|
| 154 |
+
except json.JSONDecodeError:
|
| 155 |
+
# Fallback to Levenshtein similarity if JSON parsing fails
|
| 156 |
+
try:
|
| 157 |
+
import Levenshtein
|
| 158 |
+
levenshtein_distance = Levenshtein.distance(pred_text, gt_text)
|
| 159 |
+
similarity = 1 - levenshtein_distance / max(len(pred_text), len(gt_text))
|
| 160 |
+
return similarity, f"⚠️ JSON parsing failed, using Levenshtein similarity: {similarity:.4f}"
|
| 161 |
+
except ImportError:
|
| 162 |
+
return 0.0, "❌ JSON parsing failed and Levenshtein library unavailable"
|
| 163 |
+
except Exception as e:
|
| 164 |
+
return 0.0, f"❌ Evaluation process error: {str(e)}"
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def save_result(result_path, data):
|
| 168 |
+
"""Save results to jsonl file"""
|
| 169 |
+
os.makedirs(os.path.dirname(result_path) or '.', exist_ok=True)
|
| 170 |
+
with open(result_path, "a", encoding="utf-8") as f:
|
| 171 |
+
f.write(json.dumps(data, ensure_ascii=False, default=str) + "\n")
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def main():
|
| 175 |
+
parser = argparse.ArgumentParser(
|
| 176 |
+
description="Compare similarity between predicted and ground truth Excel JSON data.")
|
| 177 |
+
parser.add_argument('--output', required=True, help="Path to predicted JSON file.")
|
| 178 |
+
parser.add_argument('--groundtruth', required=True, help="Path to ground truth JSON file.")
|
| 179 |
+
parser.add_argument('--result', required=True, help="Path to save evaluation results (.jsonl).")
|
| 180 |
+
args = parser.parse_args()
|
| 181 |
+
|
| 182 |
+
result_dict = {
|
| 183 |
+
"Process": False,
|
| 184 |
+
"Result": False,
|
| 185 |
+
"TimePoint": datetime.datetime.now().isoformat(),
|
| 186 |
+
"comments": ""
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
# Step 1: Check if files exist and are not empty
|
| 190 |
+
if not is_valid_file(args.output):
|
| 191 |
+
result_dict["comments"] = "❌ Prediction file does not exist or is empty"
|
| 192 |
+
save_result(args.result, result_dict)
|
| 193 |
+
print(result_dict["comments"])
|
| 194 |
+
return
|
| 195 |
+
|
| 196 |
+
if not is_valid_file(args.groundtruth):
|
| 197 |
+
result_dict["comments"] = "❌ GT file does not exist or is empty"
|
| 198 |
+
save_result(args.result, result_dict)
|
| 199 |
+
print(result_dict["comments"])
|
| 200 |
+
return
|
| 201 |
+
|
| 202 |
+
result_dict["Process"] = True
|
| 203 |
+
|
| 204 |
+
# Step 2: Evaluate similarity
|
| 205 |
+
similarity, msg = evaluate(args.output, args.groundtruth)
|
| 206 |
+
if similarity is None:
|
| 207 |
+
result_dict["comments"] = msg
|
| 208 |
+
save_result(args.result, result_dict)
|
| 209 |
+
print(msg)
|
| 210 |
+
return
|
| 211 |
+
|
| 212 |
+
result_dict["comments"] = msg
|
| 213 |
+
result_dict["Result"] = similarity >= 0.8
|
| 214 |
+
save_result(args.result, result_dict)
|
| 215 |
+
|
| 216 |
+
print(msg)
|
| 217 |
+
if result_dict["Result"]:
|
| 218 |
+
print("✅ Passed: Similarity ≥ 0.8")
|
| 219 |
+
else:
|
| 220 |
+
print("❌ Failed: Similarity < 0.8")
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
if __name__ == "__main__":
|
| 224 |
+
main()
|
test_scripts/Faker_01/test_script.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import csv
|
| 2 |
+
import re
|
| 3 |
+
import argparse
|
| 4 |
+
import os
|
| 5 |
+
import json
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
|
| 8 |
+
def validate_fake_users(file_path):
|
| 9 |
+
email_pattern = r'^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$'
|
| 10 |
+
comments = []
|
| 11 |
+
process_ok = True
|
| 12 |
+
result_ok = False
|
| 13 |
+
|
| 14 |
+
if not os.path.isfile(file_path):
|
| 15 |
+
comments.append(f"[Error] File does not exist: {file_path}")
|
| 16 |
+
process_ok = False
|
| 17 |
+
elif os.path.getsize(file_path) == 0:
|
| 18 |
+
comments.append(f"[Error] File is empty: {file_path}")
|
| 19 |
+
process_ok = False
|
| 20 |
+
|
| 21 |
+
if process_ok:
|
| 22 |
+
try:
|
| 23 |
+
with open(file_path, 'r', encoding='utf-8') as file:
|
| 24 |
+
reader = csv.DictReader(file)
|
| 25 |
+
fieldnames = reader.fieldnames or []
|
| 26 |
+
|
| 27 |
+
if 'Username' not in fieldnames or 'Email' not in fieldnames:
|
| 28 |
+
comments.append(f"[Error] Required columns missing. Found: {fieldnames}")
|
| 29 |
+
process_ok = False
|
| 30 |
+
else:
|
| 31 |
+
row_count = 0
|
| 32 |
+
for idx, row in enumerate(reader):
|
| 33 |
+
email = row.get('Email', '')
|
| 34 |
+
username = row.get('Username', '')
|
| 35 |
+
row_count += 1
|
| 36 |
+
if not re.match(email_pattern, email):
|
| 37 |
+
comments.append(f"[Row {idx+2}] Invalid email: {email}")
|
| 38 |
+
process_ok = False
|
| 39 |
+
break
|
| 40 |
+
if not username.strip():
|
| 41 |
+
comments.append(f"[Row {idx+2}] Empty Username field")
|
| 42 |
+
process_ok = False
|
| 43 |
+
break
|
| 44 |
+
|
| 45 |
+
if process_ok:
|
| 46 |
+
if row_count != 100:
|
| 47 |
+
comments.append(f"[Error] Expected 100 rows, but found {row_count}")
|
| 48 |
+
process_ok = False
|
| 49 |
+
else:
|
| 50 |
+
comments.append("All user data validated successfully!")
|
| 51 |
+
result_ok = True
|
| 52 |
+
|
| 53 |
+
except Exception as e:
|
| 54 |
+
comments.append(f"[Exception] File parsing error: {str(e)}")
|
| 55 |
+
process_ok = False
|
| 56 |
+
|
| 57 |
+
return {
|
| 58 |
+
"Process": process_ok,
|
| 59 |
+
"Result": result_ok,
|
| 60 |
+
"TimePoint": datetime.now().isoformat(),
|
| 61 |
+
"comments": " | ".join(comments)
|
| 62 |
+
}
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def main():
|
| 66 |
+
parser = argparse.ArgumentParser(description="Validate generated fake user data")
|
| 67 |
+
parser.add_argument('--output', type=str, required=True, help="CSV file path")
|
| 68 |
+
parser.add_argument('--result', type=str, required=True, help="Result output JSONL path")
|
| 69 |
+
args = parser.parse_args()
|
| 70 |
+
|
| 71 |
+
result_record = validate_fake_users(args.output)
|
| 72 |
+
|
| 73 |
+
os.makedirs(os.path.dirname(args.result), exist_ok=True)
|
| 74 |
+
with open(args.result, 'a', encoding='utf-8') as f:
|
| 75 |
+
f.write(json.dumps(result_record, ensure_ascii=False, default=str) + '\n')
|
| 76 |
+
|
| 77 |
+
if __name__ == "__main__":
|
| 78 |
+
main()
|
test_scripts/Faker_02/test_script.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import csv
|
| 2 |
+
import argparse
|
| 3 |
+
import json
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def validate_fake_companies(file_path, expected_num_companies=5, result_file=None):
|
| 9 |
+
if not file_path.lower().endswith('.csv'):
|
| 10 |
+
msg = f"Error: File {file_path} is not a CSV file."
|
| 11 |
+
print(msg)
|
| 12 |
+
if result_file:
|
| 13 |
+
record_result(result_file, False, msg, process_success=False)
|
| 14 |
+
return False
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
with open(file_path, 'r', encoding='utf-8') as file:
|
| 18 |
+
reader = csv.DictReader(file)
|
| 19 |
+
rows = list(reader)
|
| 20 |
+
except Exception as e:
|
| 21 |
+
msg = f"Error: Unable to read file {file_path}, reason: {str(e)}"
|
| 22 |
+
print(msg)
|
| 23 |
+
if result_file:
|
| 24 |
+
record_result(result_file, False, msg, process_success=False)
|
| 25 |
+
return False
|
| 26 |
+
|
| 27 |
+
if not rows:
|
| 28 |
+
msg = "Error: CSV file is empty or contains no valid data rows."
|
| 29 |
+
print(msg)
|
| 30 |
+
if result_file:
|
| 31 |
+
record_result(result_file, False, msg, process_success=True)
|
| 32 |
+
return False
|
| 33 |
+
|
| 34 |
+
if len(rows) != expected_num_companies:
|
| 35 |
+
msg = f"Error: Expected {expected_num_companies} records, but got {len(rows)}."
|
| 36 |
+
print(msg)
|
| 37 |
+
if result_file:
|
| 38 |
+
record_result(result_file, False, msg, process_success=True)
|
| 39 |
+
return False
|
| 40 |
+
|
| 41 |
+
required_fields = ['company name', 'address', 'phone']
|
| 42 |
+
for idx, row in enumerate(rows, 1):
|
| 43 |
+
normalized_row = {k.lower().strip(): v.strip() for k, v in row.items()}
|
| 44 |
+
|
| 45 |
+
for field in required_fields:
|
| 46 |
+
if field not in normalized_row:
|
| 47 |
+
msg = f"Error: Row {idx} is missing field '{field}'."
|
| 48 |
+
print(msg)
|
| 49 |
+
if result_file:
|
| 50 |
+
record_result(result_file, False, msg, process_success=True)
|
| 51 |
+
return False
|
| 52 |
+
|
| 53 |
+
value = normalized_row[field]
|
| 54 |
+
if not value:
|
| 55 |
+
msg = f"Error: Row {idx} has empty value for field '{field}'."
|
| 56 |
+
print(msg)
|
| 57 |
+
if result_file:
|
| 58 |
+
record_result(result_file, False, msg, process_success=True)
|
| 59 |
+
return False
|
| 60 |
+
|
| 61 |
+
# Basic content checks
|
| 62 |
+
if field == "phone" and not any(char.isdigit() for char in value):
|
| 63 |
+
msg = f"Error: Row {idx} field 'Phone' should contain digits."
|
| 64 |
+
print(msg)
|
| 65 |
+
if result_file:
|
| 66 |
+
record_result(result_file, False, msg, process_success=True)
|
| 67 |
+
return False
|
| 68 |
+
if field == "company name" and value.isdigit():
|
| 69 |
+
msg = f"Error: Row {idx} field 'Company Name' should not be all digits."
|
| 70 |
+
print(msg)
|
| 71 |
+
if result_file:
|
| 72 |
+
record_result(result_file, False, msg, process_success=True)
|
| 73 |
+
return False
|
| 74 |
+
if field == "address" and len(value) < 5:
|
| 75 |
+
msg = f"Error: Row {idx} field 'Address' seems too short to be valid."
|
| 76 |
+
print(msg)
|
| 77 |
+
if result_file:
|
| 78 |
+
record_result(result_file, False, msg, process_success=True)
|
| 79 |
+
return False
|
| 80 |
+
|
| 81 |
+
success_msg = f"All {expected_num_companies} company records passed structural and content checks."
|
| 82 |
+
print(success_msg)
|
| 83 |
+
if result_file:
|
| 84 |
+
record_result(result_file, True, success_msg, process_success=True)
|
| 85 |
+
return True
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def record_result(result_file, result, comments, process_success=True):
|
| 89 |
+
# Get current timestamp
|
| 90 |
+
time_point = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
|
| 91 |
+
|
| 92 |
+
# Build result dictionary
|
| 93 |
+
result_data = {
|
| 94 |
+
"Process": process_success,
|
| 95 |
+
"Result": result,
|
| 96 |
+
"TimePoint": time_point,
|
| 97 |
+
"comments": comments
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
# Write to jsonl file
|
| 101 |
+
try:
|
| 102 |
+
# Create file if it doesn't exist and append result
|
| 103 |
+
with open(result_file, 'a', encoding='utf-8') as file:
|
| 104 |
+
json.dump(result_data, file, ensure_ascii=False, default=str)
|
| 105 |
+
file.write('\n') # Write each result on new line
|
| 106 |
+
except Exception as e:
|
| 107 |
+
print(f"Error: Unable to write to result file {result_file}, reason: {str(e)}")
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
if __name__ == "__main__":
|
| 111 |
+
parser = argparse.ArgumentParser(description="Validate generated fake company data")
|
| 112 |
+
parser.add_argument('--output', type=str, required=True, help="Path to generated CSV file")
|
| 113 |
+
parser.add_argument('--result', type=str, required=False, help="Path to save results in jsonl format",
|
| 114 |
+
default="test_results.jsonl")
|
| 115 |
+
args = parser.parse_args()
|
| 116 |
+
|
| 117 |
+
# Execute validation
|
| 118 |
+
validate_fake_companies(args.output, result_file=args.result)
|
test_scripts/Faker_03/test_script.py
ADDED
|
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import re
|
| 3 |
+
import json
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
import os
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def jaccard_similarity_tokens(str1, str2):
|
| 9 |
+
# Token-based Jaccard similarity
|
| 10 |
+
tokens1 = set(re.findall(r'\w+', str1.lower()))
|
| 11 |
+
tokens2 = set(re.findall(r'\w+', str2.lower()))
|
| 12 |
+
if not tokens1 or not tokens2:
|
| 13 |
+
return 0.0
|
| 14 |
+
intersection = len(tokens1 & tokens2)
|
| 15 |
+
union = len(tokens1 | tokens2)
|
| 16 |
+
return intersection / union
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def validate_fake_text(input_file, output_file, result_file=None):
|
| 20 |
+
try:
|
| 21 |
+
with open(input_file, 'r', encoding='utf-8') as file:
|
| 22 |
+
original_content = file.read()
|
| 23 |
+
except Exception as e:
|
| 24 |
+
msg = f"Error: Unable to read original text file {input_file}, reason: {str(e)}"
|
| 25 |
+
print(msg)
|
| 26 |
+
if result_file:
|
| 27 |
+
record_result(result_file, False, msg)
|
| 28 |
+
return False
|
| 29 |
+
|
| 30 |
+
try:
|
| 31 |
+
with open(output_file, 'r', encoding='utf-8') as file:
|
| 32 |
+
fake_content = file.read()
|
| 33 |
+
except Exception as e:
|
| 34 |
+
msg = f"Error: Unable to read fake text file {output_file}, reason: {str(e)}"
|
| 35 |
+
print(msg)
|
| 36 |
+
if result_file:
|
| 37 |
+
record_result(result_file, False, msg)
|
| 38 |
+
return False
|
| 39 |
+
|
| 40 |
+
# Early checks
|
| 41 |
+
if original_content.strip() == fake_content.strip():
|
| 42 |
+
msg = "Error: Output text is identical to input text. No replacement performed."
|
| 43 |
+
print(msg)
|
| 44 |
+
if result_file:
|
| 45 |
+
record_result(result_file, False, msg)
|
| 46 |
+
return False
|
| 47 |
+
|
| 48 |
+
if len(fake_content.strip()) < 20:
|
| 49 |
+
msg = "Error: Output fake text is too short to be valid replacement."
|
| 50 |
+
print(msg)
|
| 51 |
+
if result_file:
|
| 52 |
+
record_result(result_file, False, msg)
|
| 53 |
+
return False
|
| 54 |
+
|
| 55 |
+
# Token-based similarity
|
| 56 |
+
similarity = jaccard_similarity_tokens(original_content, fake_content)
|
| 57 |
+
print(f"Token-based Jaccard similarity: {similarity:.4f}")
|
| 58 |
+
|
| 59 |
+
threshold = 0.3 # Token-level threshold
|
| 60 |
+
|
| 61 |
+
if similarity < threshold:
|
| 62 |
+
result_message = f"✅ Fake text replacement successful. Similarity {similarity:.4f} below threshold."
|
| 63 |
+
print(result_message)
|
| 64 |
+
if result_file:
|
| 65 |
+
record_result(result_file, True, result_message)
|
| 66 |
+
return True
|
| 67 |
+
else:
|
| 68 |
+
result_message = f"❌ Fake text too similar to original. Similarity {similarity:.4f} above threshold."
|
| 69 |
+
print(result_message)
|
| 70 |
+
if result_file:
|
| 71 |
+
record_result(result_file, False, result_message)
|
| 72 |
+
return False
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def record_result(result_file, result, comments):
|
| 77 |
+
# Get current timestamp
|
| 78 |
+
time_point = datetime.now().strftime('%Y-%m-%dT%H:%M:%S')
|
| 79 |
+
|
| 80 |
+
# Build result dictionary
|
| 81 |
+
result_data = {
|
| 82 |
+
"Process": True,
|
| 83 |
+
"Result": result,
|
| 84 |
+
"TimePoint": time_point,
|
| 85 |
+
"comments": comments
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
# Write to jsonl file
|
| 89 |
+
try:
|
| 90 |
+
# Create file if it doesn't exist and write
|
| 91 |
+
file_exists = os.path.exists(result_file)
|
| 92 |
+
with open(result_file, 'a', encoding='utf-8') as file:
|
| 93 |
+
if not file_exists:
|
| 94 |
+
# Write empty json line if file doesn't exist (optional)
|
| 95 |
+
file.write('\n')
|
| 96 |
+
json.dump(result_data, file, ensure_ascii=False, default=str)
|
| 97 |
+
file.write('\n')
|
| 98 |
+
except Exception as e:
|
| 99 |
+
print(f"Error: Unable to write to result file {result_file}, reason: {str(e)}")
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
if __name__ == "__main__":
|
| 103 |
+
parser = argparse.ArgumentParser(description="Validate effectiveness of fake text replacement")
|
| 104 |
+
parser.add_argument("--groundtruth", type=str, required=True, help="Path to original text file")
|
| 105 |
+
parser.add_argument("--output", type=str, required=True, help="Path to generated fake text file")
|
| 106 |
+
parser.add_argument("--result", type=str, required=False, help="Path to save results in jsonl format",
|
| 107 |
+
default="test_results.jsonl")
|
| 108 |
+
|
| 109 |
+
args = parser.parse_args()
|
| 110 |
+
|
| 111 |
+
# Call test function
|
| 112 |
+
validate_fake_text(args.groundtruth, args.output, result_file=args.result)
|
test_scripts/FunASR_01/test_script.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import json
|
| 3 |
+
import argparse
|
| 4 |
+
import datetime
|
| 5 |
+
import numpy as np
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def check_file_exists(file_path):
|
| 9 |
+
"""Check if file exists and is not empty"""
|
| 10 |
+
if not os.path.exists(file_path):
|
| 11 |
+
return False, f"File does not exist: {file_path}"
|
| 12 |
+
if os.path.getsize(file_path) == 0:
|
| 13 |
+
return False, f"File is empty: {file_path}"
|
| 14 |
+
return True, ""
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def cer(ref, hyp):
|
| 18 |
+
"""Character Error Rate using Levenshtein distance"""
|
| 19 |
+
r = ref
|
| 20 |
+
h = hyp
|
| 21 |
+
d = np.zeros((len(r)+1)*(len(h)+1), dtype=np.uint8).reshape((len(r)+1, len(h)+1))
|
| 22 |
+
|
| 23 |
+
for i in range(len(r)+1):
|
| 24 |
+
d[i][0] = i
|
| 25 |
+
for j in range(len(h)+1):
|
| 26 |
+
d[0][j] = j
|
| 27 |
+
|
| 28 |
+
for i in range(1, len(r)+1):
|
| 29 |
+
for j in range(1, len(h)+1):
|
| 30 |
+
cost = 0 if r[i-1] == h[j-1] else 1
|
| 31 |
+
d[i][j] = min(d[i-1][j] + 1, # deletion
|
| 32 |
+
d[i][j-1] + 1, # insertion
|
| 33 |
+
d[i-1][j-1] + cost) # substitution
|
| 34 |
+
|
| 35 |
+
return d[len(r)][len(h)] / max(len(r), 1)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def load_text(file_path):
|
| 39 |
+
"""Load full text content from file"""
|
| 40 |
+
try:
|
| 41 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 42 |
+
return f.read().replace('\n', '').strip(), ""
|
| 43 |
+
except Exception as e:
|
| 44 |
+
return None, str(e)
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def evaluate(system_output_file, ground_truth_file, cer_threshold=0.05):
|
| 48 |
+
"""Evaluate CER between system output and ground truth"""
|
| 49 |
+
# Check files
|
| 50 |
+
process_ok, process_msg = check_file_exists(system_output_file)
|
| 51 |
+
if not process_ok:
|
| 52 |
+
return False, False, process_msg
|
| 53 |
+
|
| 54 |
+
process_ok, process_msg = check_file_exists(ground_truth_file)
|
| 55 |
+
if not process_ok:
|
| 56 |
+
return False, False, process_msg
|
| 57 |
+
|
| 58 |
+
# Load content
|
| 59 |
+
sys_text, msg1 = load_text(system_output_file)
|
| 60 |
+
gt_text, msg2 = load_text(ground_truth_file)
|
| 61 |
+
|
| 62 |
+
if sys_text is None:
|
| 63 |
+
return True, False, f"Failed to load system output: {msg1}"
|
| 64 |
+
if gt_text is None:
|
| 65 |
+
return True, False, f"Failed to load ground truth: {msg2}"
|
| 66 |
+
|
| 67 |
+
score = cer(gt_text, sys_text)
|
| 68 |
+
comment = f"CER = {score:.4f}"
|
| 69 |
+
if score > cer_threshold:
|
| 70 |
+
comment += f" ❌ Exceeds threshold {cer_threshold}"
|
| 71 |
+
return True, False, comment
|
| 72 |
+
else:
|
| 73 |
+
comment += f" ✅ Within threshold {cer_threshold}"
|
| 74 |
+
return True, True, comment
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def save_results_to_jsonl(process_ok, result_ok, comments, jsonl_file):
|
| 78 |
+
"""Save test results to JSONL file"""
|
| 79 |
+
current_time = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
|
| 80 |
+
|
| 81 |
+
result_data = {
|
| 82 |
+
"Process": bool(process_ok),
|
| 83 |
+
"Result": bool(result_ok),
|
| 84 |
+
"TimePoint": current_time,
|
| 85 |
+
"comments": comments
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
os.makedirs(os.path.dirname(jsonl_file), exist_ok=True)
|
| 89 |
+
|
| 90 |
+
with open(jsonl_file, 'a', encoding='utf-8') as f:
|
| 91 |
+
json.dump(result_data, f, ensure_ascii=False, default=str)
|
| 92 |
+
f.write('\n')
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
def main():
|
| 96 |
+
parser = argparse.ArgumentParser(description='Evaluate speech recognition results (no speaker separation)')
|
| 97 |
+
parser.add_argument('--output', required=True, help='System output file path')
|
| 98 |
+
parser.add_argument('--groundtruth', required=True, help='Ground truth file path')
|
| 99 |
+
parser.add_argument('--cer_threshold', type=float, default=0.10, help='CER threshold')
|
| 100 |
+
parser.add_argument('--result', required=True, help='Result JSONL file path')
|
| 101 |
+
|
| 102 |
+
args = parser.parse_args()
|
| 103 |
+
|
| 104 |
+
process_ok, result_ok, comments = evaluate(
|
| 105 |
+
args.output,
|
| 106 |
+
args.groundtruth,
|
| 107 |
+
args.cer_threshold
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
save_results_to_jsonl(process_ok, result_ok, comments, args.result)
|
| 111 |
+
|
| 112 |
+
if not process_ok:
|
| 113 |
+
print(f"Processing failed: {comments}")
|
| 114 |
+
elif not result_ok:
|
| 115 |
+
print(f"Results do not meet requirements: {comments}")
|
| 116 |
+
else:
|
| 117 |
+
print("✅ Test passed")
|
| 118 |
+
print(comments)
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
if __name__ == "__main__":
|
| 122 |
+
main()
|
test_scripts/FunASR_02/test_script.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
import json
|
| 4 |
+
import argparse
|
| 5 |
+
from difflib import SequenceMatcher
|
| 6 |
+
import datetime
|
| 7 |
+
import re
|
| 8 |
+
|
| 9 |
+
def check_file_exists(file_path):
|
| 10 |
+
"""Check if file exists and is not empty"""
|
| 11 |
+
if not os.path.exists(file_path):
|
| 12 |
+
return False, f"File does not exist: {file_path}"
|
| 13 |
+
if os.path.getsize(file_path) == 0:
|
| 14 |
+
return False, f"File is empty: {file_path}"
|
| 15 |
+
return True, ""
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def cer(ref, hyp):
|
| 19 |
+
"""Character Error Rate = Edit Distance / Length of Reference"""
|
| 20 |
+
import numpy as np
|
| 21 |
+
ref = list(ref)
|
| 22 |
+
hyp = list(hyp)
|
| 23 |
+
d = np.zeros((len(ref)+1, len(hyp)+1), dtype=int)
|
| 24 |
+
for i in range(len(ref)+1):
|
| 25 |
+
d[i][0] = i
|
| 26 |
+
for j in range(len(hyp)+1):
|
| 27 |
+
d[0][j] = j
|
| 28 |
+
for i in range(1, len(ref)+1):
|
| 29 |
+
for j in range(1, len(hyp)+1):
|
| 30 |
+
cost = 0 if ref[i-1] == hyp[j-1] else 1
|
| 31 |
+
d[i][j] = min(
|
| 32 |
+
d[i-1][j] + 1, # deletion
|
| 33 |
+
d[i][j-1] + 1, # insertion
|
| 34 |
+
d[i-1][j-1] + cost # substitution
|
| 35 |
+
)
|
| 36 |
+
return d[len(ref)][len(hyp)] / max(len(ref), 1)
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def is_likely_english(text):
|
| 40 |
+
english_letters = re.findall(r'[a-zA-Z]', text)
|
| 41 |
+
if not english_letters:
|
| 42 |
+
return False
|
| 43 |
+
ratio = len(english_letters) / max(len(text), 1)
|
| 44 |
+
return ratio > 0.5 and len(english_letters) >= 10 # at least 10 letters, >50% are English
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def load_transcripts(file_path):
|
| 49 |
+
"""Load transcript text from file"""
|
| 50 |
+
try:
|
| 51 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 52 |
+
return f.read().replace("\n", ""), ""
|
| 53 |
+
except Exception as e:
|
| 54 |
+
return None, str(e)
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def evaluate(system_output_file, ground_truth_file, cer_threshold=0.05):
|
| 58 |
+
"""Main evaluation function: Calculate CER between system output and ground truth"""
|
| 59 |
+
# Check files
|
| 60 |
+
process_ok, process_msg = check_file_exists(system_output_file)
|
| 61 |
+
if not process_ok:
|
| 62 |
+
return False, False, process_msg
|
| 63 |
+
|
| 64 |
+
process_ok, process_msg = check_file_exists(ground_truth_file)
|
| 65 |
+
if not process_ok:
|
| 66 |
+
return False, False, process_msg
|
| 67 |
+
|
| 68 |
+
# Load transcripts
|
| 69 |
+
system_trans, msg = load_transcripts(system_output_file)
|
| 70 |
+
if system_trans is None:
|
| 71 |
+
return True, False, f"Failed to load system output: {msg}"
|
| 72 |
+
|
| 73 |
+
ground_truth, msg = load_transcripts(ground_truth_file)
|
| 74 |
+
if ground_truth is None:
|
| 75 |
+
return True, False, f"Failed to load ground truth: {msg}"
|
| 76 |
+
|
| 77 |
+
if not is_likely_english(system_trans):
|
| 78 |
+
return True, False, "Output text does not appear to be valid English transcription"
|
| 79 |
+
|
| 80 |
+
# Calculate CER
|
| 81 |
+
score = cer(ground_truth, system_trans)
|
| 82 |
+
comments = [f"CER = {score:.4f}"]
|
| 83 |
+
|
| 84 |
+
result_ok = score <= cer_threshold
|
| 85 |
+
if not result_ok:
|
| 86 |
+
comments.append(f"CER ({score:.4f}) exceeds threshold {cer_threshold}")
|
| 87 |
+
|
| 88 |
+
return True, result_ok, "\n".join(comments)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def save_results_to_jsonl(process_ok, result_ok, comments, jsonl_file):
|
| 92 |
+
"""Save test results to JSONL file"""
|
| 93 |
+
current_time = datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
|
| 94 |
+
|
| 95 |
+
result_data = {
|
| 96 |
+
"Process": bool(process_ok),
|
| 97 |
+
"Result": bool(result_ok),
|
| 98 |
+
"TimePoint": current_time,
|
| 99 |
+
"comments": comments
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
os.makedirs(os.path.dirname(jsonl_file), exist_ok=True)
|
| 103 |
+
|
| 104 |
+
with open(jsonl_file, 'a', encoding='utf-8') as f:
|
| 105 |
+
json.dump(result_data, f, ensure_ascii=False, default=str)
|
| 106 |
+
f.write('\n')
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def main():
|
| 110 |
+
parser = argparse.ArgumentParser(description='Evaluate speech recognition results')
|
| 111 |
+
parser.add_argument('--output', required=True, help='System output file path')
|
| 112 |
+
parser.add_argument('--groundtruth', required=True, help='Ground truth file path')
|
| 113 |
+
parser.add_argument('--cer_threshold', type=float, default=0.10, help='CER threshold')
|
| 114 |
+
parser.add_argument('--result', required=True, help='Result JSONL file path')
|
| 115 |
+
|
| 116 |
+
args = parser.parse_args()
|
| 117 |
+
|
| 118 |
+
process_ok, result_ok, comments = evaluate(
|
| 119 |
+
args.output,
|
| 120 |
+
args.groundtruth,
|
| 121 |
+
args.cer_threshold
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
save_results_to_jsonl(process_ok, result_ok, comments, args.result)
|
| 125 |
+
|
| 126 |
+
if not process_ok:
|
| 127 |
+
print(f"Processing failed: {comments}")
|
| 128 |
+
if not result_ok:
|
| 129 |
+
print(f"Results do not meet requirements: {comments}")
|
| 130 |
+
print("Test completed") # Changed to neutral prompt
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
if __name__ == "__main__":
|
| 134 |
+
main()
|
test_scripts/FunASR_03/test_script.py
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import jieba
|
| 3 |
+
import re
|
| 4 |
+
import json
|
| 5 |
+
from jiwer import compute_measures
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
import os
|
| 8 |
+
from collections import Counter
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def parse_args():
|
| 12 |
+
parser = argparse.ArgumentParser(description="Evaluate text similarity between FunASR output and ground truth")
|
| 13 |
+
parser.add_argument('--output', default='output.txt', help='FunASR output file path')
|
| 14 |
+
parser.add_argument('--groundtruth', default='gt.txt', help='Ground truth file path')
|
| 15 |
+
parser.add_argument('--result', default='eval_results.jsonl', help='JSONL file path to save evaluation results')
|
| 16 |
+
parser.add_argument('--wer_threshold', type=float, default=0.3,
|
| 17 |
+
help='WER threshold, task considered successful if below this value')
|
| 18 |
+
parser.add_argument('--punctuation_threshold', type=float, default=0.7,
|
| 19 |
+
help='Punctuation matching threshold, task considered failed if below this value')
|
| 20 |
+
return parser.parse_args()
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def preprocess_text(text):
|
| 24 |
+
"""Preprocess text: remove punctuation and spaces, tokenize with jieba"""
|
| 25 |
+
text = re.sub(r'[^\w\s]', '', text)
|
| 26 |
+
text = text.replace(" ", "")
|
| 27 |
+
return " ".join(jieba.cut(text))
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def extract_punctuation(text):
|
| 31 |
+
"""Extract punctuation characters from text"""
|
| 32 |
+
return re.findall(r'[^\w\s]', text)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def punctuation_match_score(output_puncs, gt_puncs):
|
| 36 |
+
"""Compute matching score based on overlapping punctuation"""
|
| 37 |
+
output_counter = Counter(output_puncs)
|
| 38 |
+
gt_counter = Counter(gt_puncs)
|
| 39 |
+
common = sum((output_counter & gt_counter).values())
|
| 40 |
+
total = max(len(gt_puncs), 1)
|
| 41 |
+
return common / total
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def load_text(file_path, is_output=False):
|
| 45 |
+
"""Load text file, extract 'text' field from FunASR output when is_output=True"""
|
| 46 |
+
try:
|
| 47 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 48 |
+
content = f.read().strip()
|
| 49 |
+
if not content:
|
| 50 |
+
raise ValueError(f"File {file_path} is empty")
|
| 51 |
+
|
| 52 |
+
if is_output:
|
| 53 |
+
match = re.search(r"'text':\s*'([^']*)'", content)
|
| 54 |
+
if match:
|
| 55 |
+
extracted_text = match.group(1)
|
| 56 |
+
else:
|
| 57 |
+
extracted_text = content
|
| 58 |
+
return preprocess_text(extracted_text), extracted_text
|
| 59 |
+
else:
|
| 60 |
+
return preprocess_text(content), content
|
| 61 |
+
except FileNotFoundError:
|
| 62 |
+
return "", False
|
| 63 |
+
except Exception as e:
|
| 64 |
+
return "", False
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def evaluate(output_file, gt_file, result_file, wer_threshold, punctuation_threshold):
|
| 69 |
+
comments = []
|
| 70 |
+
process_success = False
|
| 71 |
+
result_success = False
|
| 72 |
+
|
| 73 |
+
output_text, output_raw = load_text(output_file, is_output=True)
|
| 74 |
+
gt_text, gt_raw = load_text(gt_file, is_output=False)
|
| 75 |
+
|
| 76 |
+
if output_text and gt_text:
|
| 77 |
+
process_success = True
|
| 78 |
+
comments.append("Input files exist, are non-empty and correctly formatted")
|
| 79 |
+
else:
|
| 80 |
+
comments.append("Input file issues:")
|
| 81 |
+
if not output_text:
|
| 82 |
+
comments.append(f"Output file {output_file} does not exist or has incorrect format")
|
| 83 |
+
if not gt_text:
|
| 84 |
+
comments.append(f"Ground truth file {gt_file} does not exist or has incorrect format")
|
| 85 |
+
|
| 86 |
+
wer_score = None
|
| 87 |
+
punctuation_score = None
|
| 88 |
+
|
| 89 |
+
if process_success:
|
| 90 |
+
try:
|
| 91 |
+
measures = compute_measures(gt_text, output_text)
|
| 92 |
+
wer_score = measures['wer']
|
| 93 |
+
|
| 94 |
+
output_puncs = extract_punctuation(output_raw)
|
| 95 |
+
gt_puncs = extract_punctuation(gt_raw)
|
| 96 |
+
punctuation_score = punctuation_match_score(output_puncs, gt_puncs)
|
| 97 |
+
|
| 98 |
+
comments.append(f"Word Error Rate (WER): {wer_score:.4f}")
|
| 99 |
+
comments.append(f"Punctuation matching score: {punctuation_score:.4f}")
|
| 100 |
+
|
| 101 |
+
if wer_score <= wer_threshold and punctuation_score >= punctuation_threshold:
|
| 102 |
+
result_success = True
|
| 103 |
+
comments.append("Task completed successfully (WER and punctuation both meet thresholds)")
|
| 104 |
+
else:
|
| 105 |
+
comments.append(
|
| 106 |
+
f"Task failed (WER {'above' if wer_score > wer_threshold else 'below'} threshold {wer_threshold}, "
|
| 107 |
+
f"punctuation {'below' if punctuation_score < punctuation_threshold else 'meets'} threshold {punctuation_threshold})"
|
| 108 |
+
)
|
| 109 |
+
except Exception as e:
|
| 110 |
+
comments.append(f"Exception occurred during evaluation: {str(e)}")
|
| 111 |
+
else:
|
| 112 |
+
comments.append("Evaluation not performed due to invalid input files")
|
| 113 |
+
|
| 114 |
+
for c in comments:
|
| 115 |
+
print(c)
|
| 116 |
+
|
| 117 |
+
result_entry = {
|
| 118 |
+
"Process": process_success,
|
| 119 |
+
"Result": result_success,
|
| 120 |
+
"TimePoint": datetime.now().strftime("%Y-%m-%dT%H:%M:%S"),
|
| 121 |
+
"comments": "; ".join(comments)
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
try:
|
| 125 |
+
os.makedirs(os.path.dirname(result_file), exist_ok=True)
|
| 126 |
+
with open(result_file, 'a', encoding='utf-8') as f:
|
| 127 |
+
json.dump(result_entry, f, ensure_ascii=False, default=str)
|
| 128 |
+
f.write('\n')
|
| 129 |
+
except Exception as e:
|
| 130 |
+
print(f"Error saving result to {result_file}: {str(e)}")
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
if __name__ == "__main__":
|
| 134 |
+
args = parse_args()
|
| 135 |
+
evaluate(args.output, args.groundtruth, args.result, args.wer_threshold, args.punctuation_threshold)
|
test_scripts/InvisibleWatermark_01/imwatermark/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .watermark import WatermarkEncoder, WatermarkDecoder
|
test_scripts/InvisibleWatermark_01/imwatermark/dwtDctSvd.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import copy
|
| 3 |
+
import cv2
|
| 4 |
+
import pywt
|
| 5 |
+
import math
|
| 6 |
+
import pprint
|
| 7 |
+
|
| 8 |
+
pp = pprint.PrettyPrinter(indent=2)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class EmbedDwtDctSvd(object):
|
| 12 |
+
def __init__(self, watermarks=[], wmLen=8, scales=[0,36,0], block=4):
|
| 13 |
+
self._watermarks = watermarks
|
| 14 |
+
self._wmLen = wmLen
|
| 15 |
+
self._scales = scales
|
| 16 |
+
self._block = block
|
| 17 |
+
|
| 18 |
+
def encode(self, bgr):
|
| 19 |
+
(row, col, channels) = bgr.shape
|
| 20 |
+
|
| 21 |
+
yuv = cv2.cvtColor(bgr, cv2.COLOR_BGR2YUV)
|
| 22 |
+
|
| 23 |
+
for channel in range(2):
|
| 24 |
+
if self._scales[channel] <= 0:
|
| 25 |
+
continue
|
| 26 |
+
|
| 27 |
+
ca1,(h1,v1,d1) = pywt.dwt2(yuv[:row//4*4,:col//4*4,channel], 'haar')
|
| 28 |
+
self.encode_frame(ca1, self._scales[channel])
|
| 29 |
+
|
| 30 |
+
yuv[:row//4*4,:col//4*4,channel] = pywt.idwt2((ca1, (v1,h1,d1)), 'haar')
|
| 31 |
+
|
| 32 |
+
bgr_encoded = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR)
|
| 33 |
+
return bgr_encoded
|
| 34 |
+
|
| 35 |
+
def decode(self, bgr):
|
| 36 |
+
(row, col, channels) = bgr.shape
|
| 37 |
+
|
| 38 |
+
yuv = cv2.cvtColor(bgr, cv2.COLOR_BGR2YUV)
|
| 39 |
+
|
| 40 |
+
scores = [[] for i in range(self._wmLen)]
|
| 41 |
+
for channel in range(2):
|
| 42 |
+
if self._scales[channel] <= 0:
|
| 43 |
+
continue
|
| 44 |
+
|
| 45 |
+
ca1,(h1,v1,d1) = pywt.dwt2(yuv[:row//4*4,:col//4*4,channel], 'haar')
|
| 46 |
+
|
| 47 |
+
scores = self.decode_frame(ca1, self._scales[channel], scores)
|
| 48 |
+
|
| 49 |
+
avgScores = list(map(lambda l: np.array(l).mean(), scores))
|
| 50 |
+
|
| 51 |
+
bits = (np.array(avgScores) * 255 > 127)
|
| 52 |
+
return bits
|
| 53 |
+
|
| 54 |
+
def decode_frame(self, frame, scale, scores):
|
| 55 |
+
(row, col) = frame.shape
|
| 56 |
+
num = 0
|
| 57 |
+
|
| 58 |
+
for i in range(row//self._block):
|
| 59 |
+
for j in range(col//self._block):
|
| 60 |
+
block = frame[i*self._block : i*self._block + self._block,
|
| 61 |
+
j*self._block : j*self._block + self._block]
|
| 62 |
+
|
| 63 |
+
score = self.infer_dct_svd(block, scale)
|
| 64 |
+
wmBit = num % self._wmLen
|
| 65 |
+
scores[wmBit].append(score)
|
| 66 |
+
num = num + 1
|
| 67 |
+
|
| 68 |
+
return scores
|
| 69 |
+
|
| 70 |
+
def diffuse_dct_svd(self, block, wmBit, scale):
|
| 71 |
+
u,s,v = np.linalg.svd(cv2.dct(block))
|
| 72 |
+
|
| 73 |
+
s[0] = (s[0] // scale + 0.25 + 0.5 * wmBit) * scale
|
| 74 |
+
return cv2.idct(np.dot(u, np.dot(np.diag(s), v)))
|
| 75 |
+
|
| 76 |
+
def infer_dct_svd(self, block, scale):
|
| 77 |
+
u,s,v = np.linalg.svd(cv2.dct(block))
|
| 78 |
+
|
| 79 |
+
score = 0
|
| 80 |
+
score = int ((s[0] % scale) > scale * 0.5)
|
| 81 |
+
return score
|
| 82 |
+
if score >= 0.5:
|
| 83 |
+
return 1.0
|
| 84 |
+
else:
|
| 85 |
+
return 0.0
|
| 86 |
+
|
| 87 |
+
def encode_frame(self, frame, scale):
|
| 88 |
+
'''
|
| 89 |
+
frame is a matrix (M, N)
|
| 90 |
+
|
| 91 |
+
we get K (watermark bits size) blocks (self._block x self._block)
|
| 92 |
+
|
| 93 |
+
For i-th block, we encode watermark[i] bit into it
|
| 94 |
+
'''
|
| 95 |
+
(row, col) = frame.shape
|
| 96 |
+
num = 0
|
| 97 |
+
for i in range(row//self._block):
|
| 98 |
+
for j in range(col//self._block):
|
| 99 |
+
block = frame[i*self._block : i*self._block + self._block,
|
| 100 |
+
j*self._block : j*self._block + self._block]
|
| 101 |
+
wmBit = self._watermarks[(num % self._wmLen)]
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
diffusedBlock = self.diffuse_dct_svd(block, wmBit, scale)
|
| 105 |
+
frame[i*self._block : i*self._block + self._block,
|
| 106 |
+
j*self._block : j*self._block + self._block] = diffusedBlock
|
| 107 |
+
|
| 108 |
+
num = num+1
|
test_scripts/InvisibleWatermark_01/imwatermark/maxDct.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import copy
|
| 3 |
+
import cv2
|
| 4 |
+
import pywt
|
| 5 |
+
import math
|
| 6 |
+
import pprint
|
| 7 |
+
|
| 8 |
+
pp = pprint.PrettyPrinter(indent=2)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class EmbedMaxDct(object):
|
| 12 |
+
def __init__(self, watermarks=[], wmLen=8, scales=[0,36,36], block=4):
|
| 13 |
+
self._watermarks = watermarks
|
| 14 |
+
self._wmLen = wmLen
|
| 15 |
+
self._scales = scales
|
| 16 |
+
self._block = block
|
| 17 |
+
|
| 18 |
+
def encode(self, bgr):
|
| 19 |
+
(row, col, channels) = bgr.shape
|
| 20 |
+
|
| 21 |
+
yuv = cv2.cvtColor(bgr, cv2.COLOR_BGR2YUV)
|
| 22 |
+
|
| 23 |
+
for channel in range(2):
|
| 24 |
+
if self._scales[channel] <= 0:
|
| 25 |
+
continue
|
| 26 |
+
|
| 27 |
+
ca1,(h1,v1,d1) = pywt.dwt2(yuv[:row//4*4,:col//4*4,channel], 'haar')
|
| 28 |
+
self.encode_frame(ca1, self._scales[channel])
|
| 29 |
+
|
| 30 |
+
yuv[:row//4*4,:col//4*4,channel] = pywt.idwt2((ca1, (v1,h1,d1)), 'haar')
|
| 31 |
+
|
| 32 |
+
bgr_encoded = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR)
|
| 33 |
+
return bgr_encoded
|
| 34 |
+
|
| 35 |
+
def decode(self, bgr):
|
| 36 |
+
(row, col, channels) = bgr.shape
|
| 37 |
+
|
| 38 |
+
yuv = cv2.cvtColor(bgr, cv2.COLOR_BGR2YUV)
|
| 39 |
+
|
| 40 |
+
scores = [[] for i in range(self._wmLen)]
|
| 41 |
+
for channel in range(2):
|
| 42 |
+
if self._scales[channel] <= 0:
|
| 43 |
+
continue
|
| 44 |
+
|
| 45 |
+
ca1,(h1,v1,d1) = pywt.dwt2(yuv[:row//4*4,:col//4*4,channel], 'haar')
|
| 46 |
+
|
| 47 |
+
scores = self.decode_frame(ca1, self._scales[channel], scores)
|
| 48 |
+
|
| 49 |
+
avgScores = list(map(lambda l: np.array(l).mean(), scores))
|
| 50 |
+
|
| 51 |
+
bits = (np.array(avgScores) * 255 > 127)
|
| 52 |
+
return bits
|
| 53 |
+
|
| 54 |
+
def decode_frame(self, frame, scale, scores):
|
| 55 |
+
(row, col) = frame.shape
|
| 56 |
+
num = 0
|
| 57 |
+
|
| 58 |
+
for i in range(row//self._block):
|
| 59 |
+
for j in range(col//self._block):
|
| 60 |
+
block = frame[i*self._block : i*self._block + self._block,
|
| 61 |
+
j*self._block : j*self._block + self._block]
|
| 62 |
+
|
| 63 |
+
score = self.infer_dct_matrix(block, scale)
|
| 64 |
+
#score = self.infer_dct_svd(block, scale)
|
| 65 |
+
wmBit = num % self._wmLen
|
| 66 |
+
scores[wmBit].append(score)
|
| 67 |
+
num = num + 1
|
| 68 |
+
|
| 69 |
+
return scores
|
| 70 |
+
|
| 71 |
+
def diffuse_dct_svd(self, block, wmBit, scale):
|
| 72 |
+
u,s,v = np.linalg.svd(cv2.dct(block))
|
| 73 |
+
|
| 74 |
+
s[0] = (s[0] // scale + 0.25 + 0.5 * wmBit) * scale
|
| 75 |
+
return cv2.idct(np.dot(u, np.dot(np.diag(s), v)))
|
| 76 |
+
|
| 77 |
+
def infer_dct_svd(self, block, scale):
|
| 78 |
+
u,s,v = np.linalg.svd(cv2.dct(block))
|
| 79 |
+
|
| 80 |
+
score = 0
|
| 81 |
+
score = int ((s[0] % scale) > scale * 0.5)
|
| 82 |
+
return score
|
| 83 |
+
if score >= 0.5:
|
| 84 |
+
return 1.0
|
| 85 |
+
else:
|
| 86 |
+
return 0.0
|
| 87 |
+
|
| 88 |
+
def diffuse_dct_matrix(self, block, wmBit, scale):
|
| 89 |
+
pos = np.argmax(abs(block.flatten()[1:])) + 1
|
| 90 |
+
i, j = pos // self._block, pos % self._block
|
| 91 |
+
val = block[i][j]
|
| 92 |
+
if val >= 0.0:
|
| 93 |
+
block[i][j] = (val//scale + 0.25 + 0.5 * wmBit) * scale
|
| 94 |
+
else:
|
| 95 |
+
val = abs(val)
|
| 96 |
+
block[i][j] = -1.0 * (val//scale + 0.25 + 0.5 * wmBit) * scale
|
| 97 |
+
return block
|
| 98 |
+
|
| 99 |
+
def infer_dct_matrix(self, block, scale):
|
| 100 |
+
pos = np.argmax(abs(block.flatten()[1:])) + 1
|
| 101 |
+
i, j = pos // self._block, pos % self._block
|
| 102 |
+
|
| 103 |
+
val = block[i][j]
|
| 104 |
+
if val < 0:
|
| 105 |
+
val = abs(val)
|
| 106 |
+
|
| 107 |
+
if (val % scale) > 0.5 * scale:
|
| 108 |
+
return 1
|
| 109 |
+
else:
|
| 110 |
+
return 0
|
| 111 |
+
|
| 112 |
+
def encode_frame(self, frame, scale):
|
| 113 |
+
'''
|
| 114 |
+
frame is a matrix (M, N)
|
| 115 |
+
|
| 116 |
+
we get K (watermark bits size) blocks (self._block x self._block)
|
| 117 |
+
|
| 118 |
+
For i-th block, we encode watermark[i] bit into it
|
| 119 |
+
'''
|
| 120 |
+
(row, col) = frame.shape
|
| 121 |
+
num = 0
|
| 122 |
+
for i in range(row//self._block):
|
| 123 |
+
for j in range(col//self._block):
|
| 124 |
+
block = frame[i*self._block : i*self._block + self._block,
|
| 125 |
+
j*self._block : j*self._block + self._block]
|
| 126 |
+
wmBit = self._watermarks[(num % self._wmLen)]
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
diffusedBlock = self.diffuse_dct_matrix(block, wmBit, scale)
|
| 130 |
+
#diffusedBlock = self.diffuse_dct_svd(block, wmBit, scale)
|
| 131 |
+
frame[i*self._block : i*self._block + self._block,
|
| 132 |
+
j*self._block : j*self._block + self._block] = diffusedBlock
|
| 133 |
+
|
| 134 |
+
num = num+1
|
test_scripts/InvisibleWatermark_01/imwatermark/rivaGan.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch
|
| 3 |
+
import cv2
|
| 4 |
+
import os
|
| 5 |
+
import time
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class RivaWatermark(object):
|
| 9 |
+
encoder = None
|
| 10 |
+
decoder = None
|
| 11 |
+
|
| 12 |
+
def __init__(self, watermarks=[], wmLen=32, threshold=0.52):
|
| 13 |
+
self._watermarks = watermarks
|
| 14 |
+
self._threshold = threshold
|
| 15 |
+
if wmLen not in [32]:
|
| 16 |
+
raise RuntimeError('rivaGan only supports 32 bits watermarks now.')
|
| 17 |
+
self._data = torch.from_numpy(np.array([self._watermarks], dtype=np.float32))
|
| 18 |
+
|
| 19 |
+
@classmethod
|
| 20 |
+
def loadModel(cls):
|
| 21 |
+
try:
|
| 22 |
+
import onnxruntime
|
| 23 |
+
except ImportError:
|
| 24 |
+
raise ImportError(
|
| 25 |
+
"The `RivaWatermark` class requires onnxruntime to be installed. "
|
| 26 |
+
"You can install it with pip: `pip install onnxruntime`."
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
if RivaWatermark.encoder and RivaWatermark.decoder:
|
| 30 |
+
return
|
| 31 |
+
modelDir = os.path.dirname(os.path.abspath(__file__))
|
| 32 |
+
RivaWatermark.encoder = onnxruntime.InferenceSession(
|
| 33 |
+
os.path.join(modelDir, 'rivagan_encoder.onnx'))
|
| 34 |
+
RivaWatermark.decoder = onnxruntime.InferenceSession(
|
| 35 |
+
os.path.join(modelDir, 'rivagan_decoder.onnx'))
|
| 36 |
+
|
| 37 |
+
def encode(self, frame):
|
| 38 |
+
if not RivaWatermark.encoder:
|
| 39 |
+
raise RuntimeError('call loadModel method first')
|
| 40 |
+
|
| 41 |
+
frame = torch.from_numpy(np.array([frame], dtype=np.float32)) / 127.5 - 1.0
|
| 42 |
+
frame = frame.permute(3, 0, 1, 2).unsqueeze(0)
|
| 43 |
+
|
| 44 |
+
inputs = {
|
| 45 |
+
'frame': frame.detach().cpu().numpy(),
|
| 46 |
+
'data': self._data.detach().cpu().numpy()
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
outputs = RivaWatermark.encoder.run(None, inputs)
|
| 50 |
+
wm_frame = outputs[0]
|
| 51 |
+
wm_frame = torch.clamp(torch.from_numpy(wm_frame), min=-1.0, max=1.0)
|
| 52 |
+
wm_frame = (
|
| 53 |
+
(wm_frame[0, :, 0, :, :].permute(1, 2, 0) + 1.0) * 127.5
|
| 54 |
+
).detach().cpu().numpy().astype('uint8')
|
| 55 |
+
|
| 56 |
+
return wm_frame
|
| 57 |
+
|
| 58 |
+
def decode(self, frame):
|
| 59 |
+
if not RivaWatermark.decoder:
|
| 60 |
+
raise RuntimeError('you need load model first')
|
| 61 |
+
|
| 62 |
+
frame = torch.from_numpy(np.array([frame], dtype=np.float32)) / 127.5 - 1.0
|
| 63 |
+
frame = frame.permute(3, 0, 1, 2).unsqueeze(0)
|
| 64 |
+
inputs = {
|
| 65 |
+
'frame': frame.detach().cpu().numpy(),
|
| 66 |
+
}
|
| 67 |
+
outputs = RivaWatermark.decoder.run(None, inputs)
|
| 68 |
+
data = outputs[0][0]
|
| 69 |
+
return np.array(data > self._threshold, dtype=np.uint8)
|
test_scripts/InvisibleWatermark_01/imwatermark/rivagan_decoder.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b006690cf352fa50a91a0b6d6241dbcded0a9957969e2c0665cf9a7011b3880
|
| 3 |
+
size 1088816
|
test_scripts/InvisibleWatermark_01/imwatermark/rivagan_encoder.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:66bc7814fadac7d686105c8ab26c6cf2cc8e40deb83e67bb6ef5b9de9b8ece0d
|
| 3 |
+
size 657164
|
test_scripts/InvisibleWatermark_01/imwatermark/watermark.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import struct
|
| 2 |
+
import uuid
|
| 3 |
+
import copy
|
| 4 |
+
import base64
|
| 5 |
+
import cv2
|
| 6 |
+
import numpy as np
|
| 7 |
+
from .maxDct import EmbedMaxDct
|
| 8 |
+
from .dwtDctSvd import EmbedDwtDctSvd
|
| 9 |
+
from .rivaGan import RivaWatermark
|
| 10 |
+
import pprint
|
| 11 |
+
|
| 12 |
+
pp = pprint.PrettyPrinter(indent=2)
|
| 13 |
+
|
| 14 |
+
class WatermarkEncoder(object):
|
| 15 |
+
def __init__(self, content=b''):
|
| 16 |
+
seq = np.array([n for n in content], dtype=np.uint8)
|
| 17 |
+
self._watermarks = list(np.unpackbits(seq))
|
| 18 |
+
self._wmLen = len(self._watermarks)
|
| 19 |
+
self._wmType = 'bytes'
|
| 20 |
+
|
| 21 |
+
def set_by_ipv4(self, addr):
|
| 22 |
+
bits = []
|
| 23 |
+
ips = addr.split('.')
|
| 24 |
+
for ip in ips:
|
| 25 |
+
bits += list(np.unpackbits(np.array([ip % 255], dtype=np.uint8)))
|
| 26 |
+
self._watermarks = bits
|
| 27 |
+
self._wmLen = len(self._watermarks)
|
| 28 |
+
self._wmType = 'ipv4'
|
| 29 |
+
assert self._wmLen == 32
|
| 30 |
+
|
| 31 |
+
def set_by_uuid(self, uid):
|
| 32 |
+
u = uuid.UUID(uid)
|
| 33 |
+
self._wmType = 'uuid'
|
| 34 |
+
seq = np.array([n for n in u.bytes], dtype=np.uint8)
|
| 35 |
+
self._watermarks = list(np.unpackbits(seq))
|
| 36 |
+
self._wmLen = len(self._watermarks)
|
| 37 |
+
|
| 38 |
+
def set_by_bytes(self, content):
|
| 39 |
+
self._wmType = 'bytes'
|
| 40 |
+
seq = np.array([n for n in content], dtype=np.uint8)
|
| 41 |
+
self._watermarks = list(np.unpackbits(seq))
|
| 42 |
+
self._wmLen = len(self._watermarks)
|
| 43 |
+
|
| 44 |
+
def set_by_b16(self, b16):
|
| 45 |
+
content = base64.b16decode(b16)
|
| 46 |
+
self.set_by_bytes(content)
|
| 47 |
+
self._wmType = 'b16'
|
| 48 |
+
|
| 49 |
+
def set_by_bits(self, bits=[]):
|
| 50 |
+
self._watermarks = [int(bit) % 2 for bit in bits]
|
| 51 |
+
self._wmLen = len(self._watermarks)
|
| 52 |
+
self._wmType = 'bits'
|
| 53 |
+
|
| 54 |
+
def set_watermark(self, wmType='bytes', content=''):
|
| 55 |
+
if wmType == 'ipv4':
|
| 56 |
+
self.set_by_ipv4(content)
|
| 57 |
+
elif wmType == 'uuid':
|
| 58 |
+
self.set_by_uuid(content)
|
| 59 |
+
elif wmType == 'bits':
|
| 60 |
+
self.set_by_bits(content)
|
| 61 |
+
elif wmType == 'bytes':
|
| 62 |
+
self.set_by_bytes(content)
|
| 63 |
+
elif wmType == 'b16':
|
| 64 |
+
self.set_by_b16(content)
|
| 65 |
+
else:
|
| 66 |
+
raise NameError('%s is not supported' % wmType)
|
| 67 |
+
|
| 68 |
+
def get_length(self):
|
| 69 |
+
return self._wmLen
|
| 70 |
+
|
| 71 |
+
@classmethod
|
| 72 |
+
def loadModel(cls):
|
| 73 |
+
RivaWatermark.loadModel()
|
| 74 |
+
|
| 75 |
+
def encode(self, cv2Image, method='dwtDct', **configs):
|
| 76 |
+
(r, c, channels) = cv2Image.shape
|
| 77 |
+
if r*c < 256*256:
|
| 78 |
+
raise RuntimeError('image too small, should be larger than 256x256')
|
| 79 |
+
|
| 80 |
+
if method == 'dwtDct':
|
| 81 |
+
embed = EmbedMaxDct(self._watermarks, wmLen=self._wmLen, **configs)
|
| 82 |
+
return embed.encode(cv2Image)
|
| 83 |
+
elif method == 'dwtDctSvd':
|
| 84 |
+
embed = EmbedDwtDctSvd(self._watermarks, wmLen=self._wmLen, **configs)
|
| 85 |
+
return embed.encode(cv2Image)
|
| 86 |
+
elif method == 'rivaGan':
|
| 87 |
+
embed = RivaWatermark(self._watermarks, self._wmLen)
|
| 88 |
+
return embed.encode(cv2Image)
|
| 89 |
+
else:
|
| 90 |
+
raise NameError('%s is not supported' % method)
|
| 91 |
+
|
| 92 |
+
class WatermarkDecoder(object):
|
| 93 |
+
def __init__(self, wm_type='bytes', length=0):
|
| 94 |
+
self._wmType = wm_type
|
| 95 |
+
if wm_type == 'ipv4':
|
| 96 |
+
self._wmLen = 32
|
| 97 |
+
elif wm_type == 'uuid':
|
| 98 |
+
self._wmLen = 128
|
| 99 |
+
elif wm_type == 'bytes':
|
| 100 |
+
self._wmLen = length
|
| 101 |
+
elif wm_type == 'bits':
|
| 102 |
+
self._wmLen = length
|
| 103 |
+
elif wm_type == 'b16':
|
| 104 |
+
self._wmLen = length
|
| 105 |
+
else:
|
| 106 |
+
raise NameError('%s is unsupported' % wm_type)
|
| 107 |
+
|
| 108 |
+
def reconstruct_ipv4(self, bits):
|
| 109 |
+
ips = [str(ip) for ip in list(np.packbits(bits))]
|
| 110 |
+
return '.'.join(ips)
|
| 111 |
+
|
| 112 |
+
def reconstruct_uuid(self, bits):
|
| 113 |
+
nums = np.packbits(bits)
|
| 114 |
+
bstr = b''
|
| 115 |
+
for i in range(16):
|
| 116 |
+
bstr += struct.pack('>B', nums[i])
|
| 117 |
+
|
| 118 |
+
return str(uuid.UUID(bytes=bstr))
|
| 119 |
+
|
| 120 |
+
def reconstruct_bits(self, bits):
|
| 121 |
+
#return ''.join([str(b) for b in bits])
|
| 122 |
+
return bits
|
| 123 |
+
|
| 124 |
+
def reconstruct_b16(self, bits):
|
| 125 |
+
bstr = self.reconstruct_bytes(bits)
|
| 126 |
+
return base64.b16encode(bstr)
|
| 127 |
+
|
| 128 |
+
def reconstruct_bytes(self, bits):
|
| 129 |
+
nums = np.packbits(bits)
|
| 130 |
+
bstr = b''
|
| 131 |
+
for i in range(self._wmLen//8):
|
| 132 |
+
bstr += struct.pack('>B', nums[i])
|
| 133 |
+
return bstr
|
| 134 |
+
|
| 135 |
+
def reconstruct(self, bits):
|
| 136 |
+
if len(bits) != self._wmLen:
|
| 137 |
+
raise RuntimeError('bits are not matched with watermark length')
|
| 138 |
+
|
| 139 |
+
if self._wmType == 'ipv4':
|
| 140 |
+
return self.reconstruct_ipv4(bits)
|
| 141 |
+
elif self._wmType == 'uuid':
|
| 142 |
+
return self.reconstruct_uuid(bits)
|
| 143 |
+
elif self._wmType == 'bits':
|
| 144 |
+
return self.reconstruct_bits(bits)
|
| 145 |
+
elif self._wmType == 'b16':
|
| 146 |
+
return self.reconstruct_b16(bits)
|
| 147 |
+
else:
|
| 148 |
+
return self.reconstruct_bytes(bits)
|
| 149 |
+
|
| 150 |
+
def decode(self, cv2Image, method='dwtDct', **configs):
|
| 151 |
+
(r, c, channels) = cv2Image.shape
|
| 152 |
+
if r*c < 256*256:
|
| 153 |
+
raise RuntimeError('image too small, should be larger than 256x256')
|
| 154 |
+
|
| 155 |
+
bits = []
|
| 156 |
+
if method == 'dwtDct':
|
| 157 |
+
embed = EmbedMaxDct(watermarks=[], wmLen=self._wmLen, **configs)
|
| 158 |
+
bits = embed.decode(cv2Image)
|
| 159 |
+
elif method == 'dwtDctSvd':
|
| 160 |
+
embed = EmbedDwtDctSvd(watermarks=[], wmLen=self._wmLen, **configs)
|
| 161 |
+
bits = embed.decode(cv2Image)
|
| 162 |
+
elif method == 'rivaGan':
|
| 163 |
+
embed = RivaWatermark(watermarks=[], wmLen=self._wmLen, **configs)
|
| 164 |
+
bits = embed.decode(cv2Image)
|
| 165 |
+
else:
|
| 166 |
+
raise NameError('%s is not supported' % method)
|
| 167 |
+
return self.reconstruct(bits)
|
| 168 |
+
|
| 169 |
+
@classmethod
|
| 170 |
+
def loadModel(cls):
|
| 171 |
+
RivaWatermark.loadModel()
|
test_scripts/InvisibleWatermark_01/test_script.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import cv2
|
| 3 |
+
import json
|
| 4 |
+
import os
|
| 5 |
+
from datetime import datetime
|
| 6 |
+
from imwatermark import WatermarkDecoder
|
| 7 |
+
from skimage.metrics import peak_signal_noise_ratio as compare_psnr
|
| 8 |
+
|
| 9 |
+
def evaluate_watermark(original_path, watermark_text, watermarked_path):
|
| 10 |
+
process_status = True
|
| 11 |
+
final_result_status = False
|
| 12 |
+
comments = []
|
| 13 |
+
|
| 14 |
+
# Timestamp
|
| 15 |
+
time_point = datetime.now().isoformat()
|
| 16 |
+
|
| 17 |
+
# Check input files
|
| 18 |
+
if not os.path.exists(original_path) or os.path.getsize(original_path) == 0:
|
| 19 |
+
comments.append(f"Error: Original image file '{original_path}' does not exist or is empty.")
|
| 20 |
+
process_status = False
|
| 21 |
+
if not os.path.exists(watermarked_path) or os.path.getsize(watermarked_path) == 0:
|
| 22 |
+
comments.append(f"Error: Watermarked image file '{watermarked_path}' does not exist or is empty.")
|
| 23 |
+
process_status = False
|
| 24 |
+
|
| 25 |
+
if process_status:
|
| 26 |
+
bgr_original = cv2.imread(original_path)
|
| 27 |
+
bgr_encoded = cv2.imread(watermarked_path)
|
| 28 |
+
if bgr_original is None or bgr_encoded is None:
|
| 29 |
+
comments.append("Error: Failed to read images, please check if files are corrupted or in correct format.")
|
| 30 |
+
process_status = False
|
| 31 |
+
|
| 32 |
+
if process_status:
|
| 33 |
+
try:
|
| 34 |
+
# decoder = WatermarkDecoder('bytes', len(watermark_text)*8)
|
| 35 |
+
# decoded_bytes = decoder.decode(bgr_encoded, 'dwtDct')
|
| 36 |
+
# extracted_text= decoded_bytes.decode('utf-8', errors='ignore')
|
| 37 |
+
# is_match = (extracted_text == watermark_text)
|
| 38 |
+
|
| 39 |
+
max_bits = 256
|
| 40 |
+
decoder = WatermarkDecoder('bytes', max_bits)
|
| 41 |
+
decoded_bytes = decoder.decode(bgr_encoded, 'dwtDct')
|
| 42 |
+
extracted_text = decoded_bytes.decode('utf-8', errors='ignore')
|
| 43 |
+
|
| 44 |
+
is_match = (watermark_text in extracted_text)
|
| 45 |
+
|
| 46 |
+
comments.append(f"{'✅' if is_match else '❌'} Extraction result: '{extracted_text}' | GT: '{watermark_text}'")
|
| 47 |
+
psnr_value = compare_psnr(bgr_original, bgr_encoded)
|
| 48 |
+
comments.append(f"📐 PSNR: {psnr_value:.2f} dB")
|
| 49 |
+
|
| 50 |
+
# Metrics
|
| 51 |
+
match_rate = '100%' if is_match else '0%'
|
| 52 |
+
psnr_satisfied = psnr_value >= 30.0
|
| 53 |
+
comments.append(f"🎯 Watermark detection_match: {match_rate}")
|
| 54 |
+
comments.append(f"🎯 PSNR ≥ 30.0: {'✅ Satisfied' if psnr_satisfied else '❌ Not satisfied'}")
|
| 55 |
+
|
| 56 |
+
final_result_status = is_match and psnr_satisfied
|
| 57 |
+
comments.append(f"Final evaluation result: Watermark match={is_match}, PSNR satisfied={psnr_satisfied}")
|
| 58 |
+
|
| 59 |
+
except Exception as e:
|
| 60 |
+
comments.append(f"Exception occurred during watermark processing or evaluation: {e}")
|
| 61 |
+
final_result_status = False
|
| 62 |
+
|
| 63 |
+
output_data = {
|
| 64 |
+
"Process": process_status,
|
| 65 |
+
"Result": final_result_status,
|
| 66 |
+
"TimePoint": time_point,
|
| 67 |
+
"Comments": "\n".join(comments)
|
| 68 |
+
}
|
| 69 |
+
print(output_data["Comments"])
|
| 70 |
+
return output_data
|
| 71 |
+
|
| 72 |
+
def write_to_jsonl(file_path, data):
|
| 73 |
+
"""
|
| 74 |
+
Append single result to JSONL file:
|
| 75 |
+
Each run appends one JSON line.
|
| 76 |
+
"""
|
| 77 |
+
try:
|
| 78 |
+
os.makedirs(os.path.dirname(file_path), exist_ok=True)
|
| 79 |
+
with open(file_path, 'a', encoding='utf-8') as f:
|
| 80 |
+
# Add default=str to handle non-serializable types with str()
|
| 81 |
+
f.write(json.dumps(data, ensure_ascii=False, default=str) + '\n')
|
| 82 |
+
print(f"✅ Result appended to JSONL file: {file_path}")
|
| 83 |
+
except Exception as e:
|
| 84 |
+
print(f"❌ Error occurred while writing to JSONL file: {e}")
|
| 85 |
+
|
| 86 |
+
if __name__ == "__main__":
|
| 87 |
+
parser = argparse.ArgumentParser(
|
| 88 |
+
description="Extract and verify blind watermark, calculate image quality, and store results as JSONL")
|
| 89 |
+
parser.add_argument("--groundtruth", required=True, help="Path to original image")
|
| 90 |
+
parser.add_argument("--output", required=True, help="Path to watermarked image")
|
| 91 |
+
parser.add_argument("--watermark", required=True, help="Expected watermark content to extract")
|
| 92 |
+
parser.add_argument("--result", help="File path to store JSONL results")
|
| 93 |
+
|
| 94 |
+
args = parser.parse_args()
|
| 95 |
+
|
| 96 |
+
evaluation_result = evaluate_watermark(
|
| 97 |
+
args.groundtruth, args.watermark, args.output)
|
| 98 |
+
|
| 99 |
+
if args.result:
|
| 100 |
+
write_to_jsonl(args.result, evaluation_result)
|
test_scripts/InvisibleWatermark_02/imwatermark/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .watermark import WatermarkEncoder, WatermarkDecoder
|
test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (290 Bytes). View file
|
|
|
test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/__init__.cpython-37.pyc
ADDED
|
Binary file (252 Bytes). View file
|
|
|
test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/__init__.cpython-38.pyc
ADDED
|
Binary file (256 Bytes). View file
|
|
|
test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/dwtDctSvd.cpython-312.pyc
ADDED
|
Binary file (6.62 kB). View file
|
|
|
test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/maxDct.cpython-312.pyc
ADDED
|
Binary file (7.87 kB). View file
|
|
|
test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/maxDct.cpython-37.pyc
ADDED
|
Binary file (4.4 kB). View file
|
|
|
test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/maxDct.cpython-38.pyc
ADDED
|
Binary file (4.4 kB). View file
|
|
|
test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/rivaGan.cpython-312.pyc
ADDED
|
Binary file (4.73 kB). View file
|
|
|
test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/watermark.cpython-312.pyc
ADDED
|
Binary file (10.1 kB). View file
|
|
|
test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/watermark.cpython-37.pyc
ADDED
|
Binary file (6.82 kB). View file
|
|
|
test_scripts/InvisibleWatermark_02/imwatermark/__pycache__/watermark.cpython-38.pyc
ADDED
|
Binary file (6.79 kB). View file
|
|
|
test_scripts/InvisibleWatermark_02/imwatermark/dwtDctSvd.py
ADDED
|
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import copy
|
| 3 |
+
import cv2
|
| 4 |
+
import pywt
|
| 5 |
+
import math
|
| 6 |
+
import pprint
|
| 7 |
+
|
| 8 |
+
pp = pprint.PrettyPrinter(indent=2)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class EmbedDwtDctSvd(object):
|
| 12 |
+
def __init__(self, watermarks=[], wmLen=8, scales=[0,36,0], block=4):
|
| 13 |
+
self._watermarks = watermarks
|
| 14 |
+
self._wmLen = wmLen
|
| 15 |
+
self._scales = scales
|
| 16 |
+
self._block = block
|
| 17 |
+
|
| 18 |
+
def encode(self, bgr):
|
| 19 |
+
(row, col, channels) = bgr.shape
|
| 20 |
+
|
| 21 |
+
yuv = cv2.cvtColor(bgr, cv2.COLOR_BGR2YUV)
|
| 22 |
+
|
| 23 |
+
for channel in range(2):
|
| 24 |
+
if self._scales[channel] <= 0:
|
| 25 |
+
continue
|
| 26 |
+
|
| 27 |
+
ca1,(h1,v1,d1) = pywt.dwt2(yuv[:row//4*4,:col//4*4,channel], 'haar')
|
| 28 |
+
self.encode_frame(ca1, self._scales[channel])
|
| 29 |
+
|
| 30 |
+
yuv[:row//4*4,:col//4*4,channel] = pywt.idwt2((ca1, (v1,h1,d1)), 'haar')
|
| 31 |
+
|
| 32 |
+
bgr_encoded = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR)
|
| 33 |
+
return bgr_encoded
|
| 34 |
+
|
| 35 |
+
def decode(self, bgr):
|
| 36 |
+
(row, col, channels) = bgr.shape
|
| 37 |
+
|
| 38 |
+
yuv = cv2.cvtColor(bgr, cv2.COLOR_BGR2YUV)
|
| 39 |
+
|
| 40 |
+
scores = [[] for i in range(self._wmLen)]
|
| 41 |
+
for channel in range(2):
|
| 42 |
+
if self._scales[channel] <= 0:
|
| 43 |
+
continue
|
| 44 |
+
|
| 45 |
+
ca1,(h1,v1,d1) = pywt.dwt2(yuv[:row//4*4,:col//4*4,channel], 'haar')
|
| 46 |
+
|
| 47 |
+
scores = self.decode_frame(ca1, self._scales[channel], scores)
|
| 48 |
+
|
| 49 |
+
avgScores = list(map(lambda l: np.array(l).mean(), scores))
|
| 50 |
+
|
| 51 |
+
bits = (np.array(avgScores) * 255 > 127)
|
| 52 |
+
return bits
|
| 53 |
+
|
| 54 |
+
def decode_frame(self, frame, scale, scores):
|
| 55 |
+
(row, col) = frame.shape
|
| 56 |
+
num = 0
|
| 57 |
+
|
| 58 |
+
for i in range(row//self._block):
|
| 59 |
+
for j in range(col//self._block):
|
| 60 |
+
block = frame[i*self._block : i*self._block + self._block,
|
| 61 |
+
j*self._block : j*self._block + self._block]
|
| 62 |
+
|
| 63 |
+
score = self.infer_dct_svd(block, scale)
|
| 64 |
+
wmBit = num % self._wmLen
|
| 65 |
+
scores[wmBit].append(score)
|
| 66 |
+
num = num + 1
|
| 67 |
+
|
| 68 |
+
return scores
|
| 69 |
+
|
| 70 |
+
def diffuse_dct_svd(self, block, wmBit, scale):
|
| 71 |
+
u,s,v = np.linalg.svd(cv2.dct(block))
|
| 72 |
+
|
| 73 |
+
s[0] = (s[0] // scale + 0.25 + 0.5 * wmBit) * scale
|
| 74 |
+
return cv2.idct(np.dot(u, np.dot(np.diag(s), v)))
|
| 75 |
+
|
| 76 |
+
def infer_dct_svd(self, block, scale):
|
| 77 |
+
u,s,v = np.linalg.svd(cv2.dct(block))
|
| 78 |
+
|
| 79 |
+
score = 0
|
| 80 |
+
score = int ((s[0] % scale) > scale * 0.5)
|
| 81 |
+
return score
|
| 82 |
+
if score >= 0.5:
|
| 83 |
+
return 1.0
|
| 84 |
+
else:
|
| 85 |
+
return 0.0
|
| 86 |
+
|
| 87 |
+
def encode_frame(self, frame, scale):
|
| 88 |
+
'''
|
| 89 |
+
frame is a matrix (M, N)
|
| 90 |
+
|
| 91 |
+
we get K (watermark bits size) blocks (self._block x self._block)
|
| 92 |
+
|
| 93 |
+
For i-th block, we encode watermark[i] bit into it
|
| 94 |
+
'''
|
| 95 |
+
(row, col) = frame.shape
|
| 96 |
+
num = 0
|
| 97 |
+
for i in range(row//self._block):
|
| 98 |
+
for j in range(col//self._block):
|
| 99 |
+
block = frame[i*self._block : i*self._block + self._block,
|
| 100 |
+
j*self._block : j*self._block + self._block]
|
| 101 |
+
wmBit = self._watermarks[(num % self._wmLen)]
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
diffusedBlock = self.diffuse_dct_svd(block, wmBit, scale)
|
| 105 |
+
frame[i*self._block : i*self._block + self._block,
|
| 106 |
+
j*self._block : j*self._block + self._block] = diffusedBlock
|
| 107 |
+
|
| 108 |
+
num = num+1
|
test_scripts/InvisibleWatermark_02/imwatermark/maxDct.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import copy
|
| 3 |
+
import cv2
|
| 4 |
+
import pywt
|
| 5 |
+
import math
|
| 6 |
+
import pprint
|
| 7 |
+
|
| 8 |
+
pp = pprint.PrettyPrinter(indent=2)
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class EmbedMaxDct(object):
|
| 12 |
+
def __init__(self, watermarks=[], wmLen=8, scales=[0,36,36], block=4):
|
| 13 |
+
self._watermarks = watermarks
|
| 14 |
+
self._wmLen = wmLen
|
| 15 |
+
self._scales = scales
|
| 16 |
+
self._block = block
|
| 17 |
+
|
| 18 |
+
def encode(self, bgr):
|
| 19 |
+
(row, col, channels) = bgr.shape
|
| 20 |
+
|
| 21 |
+
yuv = cv2.cvtColor(bgr, cv2.COLOR_BGR2YUV)
|
| 22 |
+
|
| 23 |
+
for channel in range(2):
|
| 24 |
+
if self._scales[channel] <= 0:
|
| 25 |
+
continue
|
| 26 |
+
|
| 27 |
+
ca1,(h1,v1,d1) = pywt.dwt2(yuv[:row//4*4,:col//4*4,channel], 'haar')
|
| 28 |
+
self.encode_frame(ca1, self._scales[channel])
|
| 29 |
+
|
| 30 |
+
yuv[:row//4*4,:col//4*4,channel] = pywt.idwt2((ca1, (v1,h1,d1)), 'haar')
|
| 31 |
+
|
| 32 |
+
bgr_encoded = cv2.cvtColor(yuv, cv2.COLOR_YUV2BGR)
|
| 33 |
+
return bgr_encoded
|
| 34 |
+
|
| 35 |
+
def decode(self, bgr):
|
| 36 |
+
(row, col, channels) = bgr.shape
|
| 37 |
+
|
| 38 |
+
yuv = cv2.cvtColor(bgr, cv2.COLOR_BGR2YUV)
|
| 39 |
+
|
| 40 |
+
scores = [[] for i in range(self._wmLen)]
|
| 41 |
+
for channel in range(2):
|
| 42 |
+
if self._scales[channel] <= 0:
|
| 43 |
+
continue
|
| 44 |
+
|
| 45 |
+
ca1,(h1,v1,d1) = pywt.dwt2(yuv[:row//4*4,:col//4*4,channel], 'haar')
|
| 46 |
+
|
| 47 |
+
scores = self.decode_frame(ca1, self._scales[channel], scores)
|
| 48 |
+
|
| 49 |
+
avgScores = list(map(lambda l: np.array(l).mean(), scores))
|
| 50 |
+
|
| 51 |
+
bits = (np.array(avgScores) * 255 > 127)
|
| 52 |
+
return bits
|
| 53 |
+
|
| 54 |
+
def decode_frame(self, frame, scale, scores):
|
| 55 |
+
(row, col) = frame.shape
|
| 56 |
+
num = 0
|
| 57 |
+
|
| 58 |
+
for i in range(row//self._block):
|
| 59 |
+
for j in range(col//self._block):
|
| 60 |
+
block = frame[i*self._block : i*self._block + self._block,
|
| 61 |
+
j*self._block : j*self._block + self._block]
|
| 62 |
+
|
| 63 |
+
score = self.infer_dct_matrix(block, scale)
|
| 64 |
+
#score = self.infer_dct_svd(block, scale)
|
| 65 |
+
wmBit = num % self._wmLen
|
| 66 |
+
scores[wmBit].append(score)
|
| 67 |
+
num = num + 1
|
| 68 |
+
|
| 69 |
+
return scores
|
| 70 |
+
|
| 71 |
+
def diffuse_dct_svd(self, block, wmBit, scale):
|
| 72 |
+
u,s,v = np.linalg.svd(cv2.dct(block))
|
| 73 |
+
|
| 74 |
+
s[0] = (s[0] // scale + 0.25 + 0.5 * wmBit) * scale
|
| 75 |
+
return cv2.idct(np.dot(u, np.dot(np.diag(s), v)))
|
| 76 |
+
|
| 77 |
+
def infer_dct_svd(self, block, scale):
|
| 78 |
+
u,s,v = np.linalg.svd(cv2.dct(block))
|
| 79 |
+
|
| 80 |
+
score = 0
|
| 81 |
+
score = int ((s[0] % scale) > scale * 0.5)
|
| 82 |
+
return score
|
| 83 |
+
if score >= 0.5:
|
| 84 |
+
return 1.0
|
| 85 |
+
else:
|
| 86 |
+
return 0.0
|
| 87 |
+
|
| 88 |
+
def diffuse_dct_matrix(self, block, wmBit, scale):
|
| 89 |
+
pos = np.argmax(abs(block.flatten()[1:])) + 1
|
| 90 |
+
i, j = pos // self._block, pos % self._block
|
| 91 |
+
val = block[i][j]
|
| 92 |
+
if val >= 0.0:
|
| 93 |
+
block[i][j] = (val//scale + 0.25 + 0.5 * wmBit) * scale
|
| 94 |
+
else:
|
| 95 |
+
val = abs(val)
|
| 96 |
+
block[i][j] = -1.0 * (val//scale + 0.25 + 0.5 * wmBit) * scale
|
| 97 |
+
return block
|
| 98 |
+
|
| 99 |
+
def infer_dct_matrix(self, block, scale):
|
| 100 |
+
pos = np.argmax(abs(block.flatten()[1:])) + 1
|
| 101 |
+
i, j = pos // self._block, pos % self._block
|
| 102 |
+
|
| 103 |
+
val = block[i][j]
|
| 104 |
+
if val < 0:
|
| 105 |
+
val = abs(val)
|
| 106 |
+
|
| 107 |
+
if (val % scale) > 0.5 * scale:
|
| 108 |
+
return 1
|
| 109 |
+
else:
|
| 110 |
+
return 0
|
| 111 |
+
|
| 112 |
+
def encode_frame(self, frame, scale):
|
| 113 |
+
'''
|
| 114 |
+
frame is a matrix (M, N)
|
| 115 |
+
|
| 116 |
+
we get K (watermark bits size) blocks (self._block x self._block)
|
| 117 |
+
|
| 118 |
+
For i-th block, we encode watermark[i] bit into it
|
| 119 |
+
'''
|
| 120 |
+
(row, col) = frame.shape
|
| 121 |
+
num = 0
|
| 122 |
+
for i in range(row//self._block):
|
| 123 |
+
for j in range(col//self._block):
|
| 124 |
+
block = frame[i*self._block : i*self._block + self._block,
|
| 125 |
+
j*self._block : j*self._block + self._block]
|
| 126 |
+
wmBit = self._watermarks[(num % self._wmLen)]
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
diffusedBlock = self.diffuse_dct_matrix(block, wmBit, scale)
|
| 130 |
+
#diffusedBlock = self.diffuse_dct_svd(block, wmBit, scale)
|
| 131 |
+
frame[i*self._block : i*self._block + self._block,
|
| 132 |
+
j*self._block : j*self._block + self._block] = diffusedBlock
|
| 133 |
+
|
| 134 |
+
num = num+1
|
test_scripts/InvisibleWatermark_02/imwatermark/rivaGan.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import torch
|
| 3 |
+
import cv2
|
| 4 |
+
import os
|
| 5 |
+
import time
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class RivaWatermark(object):
|
| 9 |
+
encoder = None
|
| 10 |
+
decoder = None
|
| 11 |
+
|
| 12 |
+
def __init__(self, watermarks=[], wmLen=32, threshold=0.52):
|
| 13 |
+
self._watermarks = watermarks
|
| 14 |
+
self._threshold = threshold
|
| 15 |
+
if wmLen not in [32]:
|
| 16 |
+
raise RuntimeError('rivaGan only supports 32 bits watermarks now.')
|
| 17 |
+
self._data = torch.from_numpy(np.array([self._watermarks], dtype=np.float32))
|
| 18 |
+
|
| 19 |
+
@classmethod
|
| 20 |
+
def loadModel(cls):
|
| 21 |
+
try:
|
| 22 |
+
import onnxruntime
|
| 23 |
+
except ImportError:
|
| 24 |
+
raise ImportError(
|
| 25 |
+
"The `RivaWatermark` class requires onnxruntime to be installed. "
|
| 26 |
+
"You can install it with pip: `pip install onnxruntime`."
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
if RivaWatermark.encoder and RivaWatermark.decoder:
|
| 30 |
+
return
|
| 31 |
+
modelDir = os.path.dirname(os.path.abspath(__file__))
|
| 32 |
+
RivaWatermark.encoder = onnxruntime.InferenceSession(
|
| 33 |
+
os.path.join(modelDir, 'rivagan_encoder.onnx'))
|
| 34 |
+
RivaWatermark.decoder = onnxruntime.InferenceSession(
|
| 35 |
+
os.path.join(modelDir, 'rivagan_decoder.onnx'))
|
| 36 |
+
|
| 37 |
+
def encode(self, frame):
|
| 38 |
+
if not RivaWatermark.encoder:
|
| 39 |
+
raise RuntimeError('call loadModel method first')
|
| 40 |
+
|
| 41 |
+
frame = torch.from_numpy(np.array([frame], dtype=np.float32)) / 127.5 - 1.0
|
| 42 |
+
frame = frame.permute(3, 0, 1, 2).unsqueeze(0)
|
| 43 |
+
|
| 44 |
+
inputs = {
|
| 45 |
+
'frame': frame.detach().cpu().numpy(),
|
| 46 |
+
'data': self._data.detach().cpu().numpy()
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
outputs = RivaWatermark.encoder.run(None, inputs)
|
| 50 |
+
wm_frame = outputs[0]
|
| 51 |
+
wm_frame = torch.clamp(torch.from_numpy(wm_frame), min=-1.0, max=1.0)
|
| 52 |
+
wm_frame = (
|
| 53 |
+
(wm_frame[0, :, 0, :, :].permute(1, 2, 0) + 1.0) * 127.5
|
| 54 |
+
).detach().cpu().numpy().astype('uint8')
|
| 55 |
+
|
| 56 |
+
return wm_frame
|
| 57 |
+
|
| 58 |
+
def decode(self, frame):
|
| 59 |
+
if not RivaWatermark.decoder:
|
| 60 |
+
raise RuntimeError('you need load model first')
|
| 61 |
+
|
| 62 |
+
frame = torch.from_numpy(np.array([frame], dtype=np.float32)) / 127.5 - 1.0
|
| 63 |
+
frame = frame.permute(3, 0, 1, 2).unsqueeze(0)
|
| 64 |
+
inputs = {
|
| 65 |
+
'frame': frame.detach().cpu().numpy(),
|
| 66 |
+
}
|
| 67 |
+
outputs = RivaWatermark.decoder.run(None, inputs)
|
| 68 |
+
data = outputs[0][0]
|
| 69 |
+
return np.array(data > self._threshold, dtype=np.uint8)
|
test_scripts/InvisibleWatermark_02/imwatermark/rivagan_decoder.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b006690cf352fa50a91a0b6d6241dbcded0a9957969e2c0665cf9a7011b3880
|
| 3 |
+
size 1088816
|
test_scripts/InvisibleWatermark_02/imwatermark/rivagan_encoder.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:66bc7814fadac7d686105c8ab26c6cf2cc8e40deb83e67bb6ef5b9de9b8ece0d
|
| 3 |
+
size 657164
|
test_scripts/InvisibleWatermark_02/imwatermark/watermark.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import struct
|
| 2 |
+
import uuid
|
| 3 |
+
import copy
|
| 4 |
+
import base64
|
| 5 |
+
import cv2
|
| 6 |
+
import numpy as np
|
| 7 |
+
from .maxDct import EmbedMaxDct
|
| 8 |
+
from .dwtDctSvd import EmbedDwtDctSvd
|
| 9 |
+
from .rivaGan import RivaWatermark
|
| 10 |
+
import pprint
|
| 11 |
+
|
| 12 |
+
pp = pprint.PrettyPrinter(indent=2)
|
| 13 |
+
|
| 14 |
+
class WatermarkEncoder(object):
|
| 15 |
+
def __init__(self, content=b''):
|
| 16 |
+
seq = np.array([n for n in content], dtype=np.uint8)
|
| 17 |
+
self._watermarks = list(np.unpackbits(seq))
|
| 18 |
+
self._wmLen = len(self._watermarks)
|
| 19 |
+
self._wmType = 'bytes'
|
| 20 |
+
|
| 21 |
+
def set_by_ipv4(self, addr):
|
| 22 |
+
bits = []
|
| 23 |
+
ips = addr.split('.')
|
| 24 |
+
for ip in ips:
|
| 25 |
+
bits += list(np.unpackbits(np.array([ip % 255], dtype=np.uint8)))
|
| 26 |
+
self._watermarks = bits
|
| 27 |
+
self._wmLen = len(self._watermarks)
|
| 28 |
+
self._wmType = 'ipv4'
|
| 29 |
+
assert self._wmLen == 32
|
| 30 |
+
|
| 31 |
+
def set_by_uuid(self, uid):
|
| 32 |
+
u = uuid.UUID(uid)
|
| 33 |
+
self._wmType = 'uuid'
|
| 34 |
+
seq = np.array([n for n in u.bytes], dtype=np.uint8)
|
| 35 |
+
self._watermarks = list(np.unpackbits(seq))
|
| 36 |
+
self._wmLen = len(self._watermarks)
|
| 37 |
+
|
| 38 |
+
def set_by_bytes(self, content):
|
| 39 |
+
self._wmType = 'bytes'
|
| 40 |
+
seq = np.array([n for n in content], dtype=np.uint8)
|
| 41 |
+
self._watermarks = list(np.unpackbits(seq))
|
| 42 |
+
self._wmLen = len(self._watermarks)
|
| 43 |
+
|
| 44 |
+
def set_by_b16(self, b16):
|
| 45 |
+
content = base64.b16decode(b16)
|
| 46 |
+
self.set_by_bytes(content)
|
| 47 |
+
self._wmType = 'b16'
|
| 48 |
+
|
| 49 |
+
def set_by_bits(self, bits=[]):
|
| 50 |
+
self._watermarks = [int(bit) % 2 for bit in bits]
|
| 51 |
+
self._wmLen = len(self._watermarks)
|
| 52 |
+
self._wmType = 'bits'
|
| 53 |
+
|
| 54 |
+
def set_watermark(self, wmType='bytes', content=''):
|
| 55 |
+
if wmType == 'ipv4':
|
| 56 |
+
self.set_by_ipv4(content)
|
| 57 |
+
elif wmType == 'uuid':
|
| 58 |
+
self.set_by_uuid(content)
|
| 59 |
+
elif wmType == 'bits':
|
| 60 |
+
self.set_by_bits(content)
|
| 61 |
+
elif wmType == 'bytes':
|
| 62 |
+
self.set_by_bytes(content)
|
| 63 |
+
elif wmType == 'b16':
|
| 64 |
+
self.set_by_b16(content)
|
| 65 |
+
else:
|
| 66 |
+
raise NameError('%s is not supported' % wmType)
|
| 67 |
+
|
| 68 |
+
def get_length(self):
|
| 69 |
+
return self._wmLen
|
| 70 |
+
|
| 71 |
+
@classmethod
|
| 72 |
+
def loadModel(cls):
|
| 73 |
+
RivaWatermark.loadModel()
|
| 74 |
+
|
| 75 |
+
def encode(self, cv2Image, method='dwtDct', **configs):
|
| 76 |
+
(r, c, channels) = cv2Image.shape
|
| 77 |
+
if r*c < 256*256:
|
| 78 |
+
raise RuntimeError('image too small, should be larger than 256x256')
|
| 79 |
+
|
| 80 |
+
if method == 'dwtDct':
|
| 81 |
+
embed = EmbedMaxDct(self._watermarks, wmLen=self._wmLen, **configs)
|
| 82 |
+
return embed.encode(cv2Image)
|
| 83 |
+
elif method == 'dwtDctSvd':
|
| 84 |
+
embed = EmbedDwtDctSvd(self._watermarks, wmLen=self._wmLen, **configs)
|
| 85 |
+
return embed.encode(cv2Image)
|
| 86 |
+
elif method == 'rivaGan':
|
| 87 |
+
embed = RivaWatermark(self._watermarks, self._wmLen)
|
| 88 |
+
return embed.encode(cv2Image)
|
| 89 |
+
else:
|
| 90 |
+
raise NameError('%s is not supported' % method)
|
| 91 |
+
|
| 92 |
+
class WatermarkDecoder(object):
|
| 93 |
+
def __init__(self, wm_type='bytes', length=0):
|
| 94 |
+
self._wmType = wm_type
|
| 95 |
+
if wm_type == 'ipv4':
|
| 96 |
+
self._wmLen = 32
|
| 97 |
+
elif wm_type == 'uuid':
|
| 98 |
+
self._wmLen = 128
|
| 99 |
+
elif wm_type == 'bytes':
|
| 100 |
+
self._wmLen = length
|
| 101 |
+
elif wm_type == 'bits':
|
| 102 |
+
self._wmLen = length
|
| 103 |
+
elif wm_type == 'b16':
|
| 104 |
+
self._wmLen = length
|
| 105 |
+
else:
|
| 106 |
+
raise NameError('%s is unsupported' % wm_type)
|
| 107 |
+
|
| 108 |
+
def reconstruct_ipv4(self, bits):
|
| 109 |
+
ips = [str(ip) for ip in list(np.packbits(bits))]
|
| 110 |
+
return '.'.join(ips)
|
| 111 |
+
|
| 112 |
+
def reconstruct_uuid(self, bits):
|
| 113 |
+
nums = np.packbits(bits)
|
| 114 |
+
bstr = b''
|
| 115 |
+
for i in range(16):
|
| 116 |
+
bstr += struct.pack('>B', nums[i])
|
| 117 |
+
|
| 118 |
+
return str(uuid.UUID(bytes=bstr))
|
| 119 |
+
|
| 120 |
+
def reconstruct_bits(self, bits):
|
| 121 |
+
#return ''.join([str(b) for b in bits])
|
| 122 |
+
return bits
|
| 123 |
+
|
| 124 |
+
def reconstruct_b16(self, bits):
|
| 125 |
+
bstr = self.reconstruct_bytes(bits)
|
| 126 |
+
return base64.b16encode(bstr)
|
| 127 |
+
|
| 128 |
+
def reconstruct_bytes(self, bits):
|
| 129 |
+
nums = np.packbits(bits)
|
| 130 |
+
bstr = b''
|
| 131 |
+
for i in range(self._wmLen//8):
|
| 132 |
+
bstr += struct.pack('>B', nums[i])
|
| 133 |
+
return bstr
|
| 134 |
+
|
| 135 |
+
def reconstruct(self, bits):
|
| 136 |
+
if len(bits) != self._wmLen:
|
| 137 |
+
raise RuntimeError('bits are not matched with watermark length')
|
| 138 |
+
|
| 139 |
+
if self._wmType == 'ipv4':
|
| 140 |
+
return self.reconstruct_ipv4(bits)
|
| 141 |
+
elif self._wmType == 'uuid':
|
| 142 |
+
return self.reconstruct_uuid(bits)
|
| 143 |
+
elif self._wmType == 'bits':
|
| 144 |
+
return self.reconstruct_bits(bits)
|
| 145 |
+
elif self._wmType == 'b16':
|
| 146 |
+
return self.reconstruct_b16(bits)
|
| 147 |
+
else:
|
| 148 |
+
return self.reconstruct_bytes(bits)
|
| 149 |
+
|
| 150 |
+
def decode(self, cv2Image, method='dwtDct', **configs):
|
| 151 |
+
(r, c, channels) = cv2Image.shape
|
| 152 |
+
if r*c < 256*256:
|
| 153 |
+
raise RuntimeError('image too small, should be larger than 256x256')
|
| 154 |
+
|
| 155 |
+
bits = []
|
| 156 |
+
if method == 'dwtDct':
|
| 157 |
+
embed = EmbedMaxDct(watermarks=[], wmLen=self._wmLen, **configs)
|
| 158 |
+
bits = embed.decode(cv2Image)
|
| 159 |
+
elif method == 'dwtDctSvd':
|
| 160 |
+
embed = EmbedDwtDctSvd(watermarks=[], wmLen=self._wmLen, **configs)
|
| 161 |
+
bits = embed.decode(cv2Image)
|
| 162 |
+
elif method == 'rivaGan':
|
| 163 |
+
embed = RivaWatermark(watermarks=[], wmLen=self._wmLen, **configs)
|
| 164 |
+
bits = embed.decode(cv2Image)
|
| 165 |
+
else:
|
| 166 |
+
raise NameError('%s is not supported' % method)
|
| 167 |
+
return self.reconstruct(bits)
|
| 168 |
+
|
| 169 |
+
@classmethod
|
| 170 |
+
def loadModel(cls):
|
| 171 |
+
RivaWatermark.loadModel()
|
test_scripts/InvisibleWatermark_02/test_script.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import os
|
| 3 |
+
import json
|
| 4 |
+
from datetime import datetime
|
| 5 |
+
from io import StringIO
|
| 6 |
+
import sys
|
| 7 |
+
|
| 8 |
+
def test_watermark_extraction(extracted_txt_path, ground_truth_path):
|
| 9 |
+
comments = []
|
| 10 |
+
process_success = True
|
| 11 |
+
result_success = False
|
| 12 |
+
|
| 13 |
+
try:
|
| 14 |
+
for path, label in [(extracted_txt_path, "Extracted result"), (ground_truth_path, "Ground Truth")]:
|
| 15 |
+
if not os.path.exists(path):
|
| 16 |
+
comments.append(f"{label} file does not exist: {path}")
|
| 17 |
+
process_success = False
|
| 18 |
+
elif os.path.getsize(path) == 0:
|
| 19 |
+
comments.append(f"{label} file is empty: {path}")
|
| 20 |
+
process_success = False
|
| 21 |
+
|
| 22 |
+
if not process_success:
|
| 23 |
+
return process_success, result_success, "\n".join(comments)
|
| 24 |
+
|
| 25 |
+
with open(extracted_txt_path, 'r', encoding='utf-8') as f:
|
| 26 |
+
extracted_watermark = f.read().strip()
|
| 27 |
+
|
| 28 |
+
with open(ground_truth_path, 'r', encoding='utf-8') as f:
|
| 29 |
+
ground_truth = f.read().strip()
|
| 30 |
+
|
| 31 |
+
if extracted_watermark == ground_truth:
|
| 32 |
+
result_success = True
|
| 33 |
+
comments.append("✅ Watermark extraction successful, exact match!")
|
| 34 |
+
else:
|
| 35 |
+
comments.append("❌ Watermark extraction failed, result does not match Ground Truth")
|
| 36 |
+
comments.append(f"Extracted watermark: {extracted_watermark}")
|
| 37 |
+
comments.append(f"Ground Truth: {ground_truth}")
|
| 38 |
+
|
| 39 |
+
except Exception as e:
|
| 40 |
+
process_success = False
|
| 41 |
+
comments.append(f"System exception: {str(e)}")
|
| 42 |
+
|
| 43 |
+
return process_success, result_success, "\n".join(comments)
|
| 44 |
+
|
| 45 |
+
def save_result_jsonl(result_path, process_flag, result_flag, comments_text):
|
| 46 |
+
record = {
|
| 47 |
+
"Process": process_flag,
|
| 48 |
+
"Result": result_flag,
|
| 49 |
+
"TimePoint": datetime.now().isoformat(timespec="seconds"),
|
| 50 |
+
"comments": comments_text
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
try:
|
| 54 |
+
with open(result_path, 'a', encoding='utf-8') as f:
|
| 55 |
+
json.dump(record, f, ensure_ascii=False)
|
| 56 |
+
f.write('\n')
|
| 57 |
+
print(f"[✅] Successfully wrote to JSONL: {result_path}")
|
| 58 |
+
except Exception as e:
|
| 59 |
+
print(f"[❌] Failed to write to JSONL: {e}")
|
| 60 |
+
|
| 61 |
+
if __name__ == "__main__":
|
| 62 |
+
parser = argparse.ArgumentParser(description="Evaluate watermark extraction accuracy (exact match)")
|
| 63 |
+
parser.add_argument("--output", required=True, help="Path to extracted watermark text file")
|
| 64 |
+
parser.add_argument("--groundtruth", required=True, help="Path to ground truth text file")
|
| 65 |
+
parser.add_argument("--result", required=False, help="JSONL output path")
|
| 66 |
+
args = parser.parse_args()
|
| 67 |
+
|
| 68 |
+
original_stdout = sys.stdout
|
| 69 |
+
buffer = StringIO()
|
| 70 |
+
sys.stdout = buffer
|
| 71 |
+
|
| 72 |
+
process_flag, result_flag, comments_text = test_watermark_extraction(
|
| 73 |
+
args.output, args.groundtruth
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
sys.stdout = original_stdout
|
| 77 |
+
captured_output = buffer.getvalue()
|
| 78 |
+
full_comments = f"{comments_text}\n{captured_output.strip()}"
|
| 79 |
+
|
| 80 |
+
if args.result:
|
| 81 |
+
save_result_jsonl(args.result, process_flag, result_flag, full_comments)
|
| 82 |
+
|
| 83 |
+
print(full_comments)
|
test_scripts/InvisibleWatermark_03/imwatermark/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
from .watermark import WatermarkEncoder, WatermarkDecoder
|
test_scripts/InvisibleWatermark_03/imwatermark/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (350 Bytes). View file
|
|
|
test_scripts/InvisibleWatermark_03/imwatermark/__pycache__/__init__.cpython-312.pyc
ADDED
|
Binary file (284 Bytes). View file
|
|
|
test_scripts/InvisibleWatermark_03/imwatermark/__pycache__/__init__.cpython-37.pyc
ADDED
|
Binary file (252 Bytes). View file
|
|
|