python将红底证件照转成蓝底 " C( N l( n; q前言 , D1 u' b( x, t. c* Pemmm…快开学了,手头只有红底证件照,但是学院要求要蓝底,这可咋办呢。懒得下ps了。自己撸起来吧。 1 F/ Q& T; i3 @. E# H 9 m& E( L3 k) p) S$ I( T, b) m) Z + c2 |3 J5 b$ D' F 0 }1 \& Q1 w f+ X+ J( w0 l" w U7 |! \1 T
方法一: lableme 0 o( w- ?2 Y2 ^9 g& e) c1 m1 f) f- [lableme标注完后。得到一个json文件,然后将这种json文件转成掩码图. & Z+ Z1 f7 c2 N- i9 C1 b ) V1 X6 N- Q% j; i' h, \# 代码来自 https://blog.csdn.net/hello_dear_you/article/details/120130155 0 e2 R/ e1 P0 S7 I! Y* K8 ^ g8 cimport json a) X& S" B5 b$ O( b' l# Simport numpy as np1 n- j) _3 w7 h0 \" D) |
import cv22 v1 F, H+ K% O* Q
# read json file 4 o, U* Q1 x' E2 Z( Gwith open("origin_json/mypic.json", "r") as f:$ `7 g. g& ^7 |4 }5 g% X8 s- b
data = f.read()' }1 V$ L' n7 N9 i
G$ W& c6 O Q% a! y6 [
# convert str to json objs! t6 D; n" P4 ~0 h7 f3 D
data = json.loads(data) + |% x$ r! N4 q/ L% s, V7 |1 g& U4 e2 |! d0 |' S+ X1 p7 j
# get the points / B* P l6 A8 |' F
points = data["shapes"][0]["points"] 6 Q! t9 t3 \# U: rpoints = np.array(points, dtype=np.int32) # tips: points location must be int32: [' u" X) }& E: y! E
4 c7 L/ o6 h' i5 \8 z# read image to get shape * z9 V) S6 |" m; Z# P4 c4 A) K" K) limage = cv2.imread("origin_png/person.jpg"): ^0 A0 ?6 X+ t; t2 U
6 ?7 P( y; A; E0 i. [8 Q! Z, G2 W
# create a blank image & K3 e; L5 w1 q0 Z- Lmask = np.zeros_like(image, dtype=np.uint8) & `- q$ T d5 R3 z; @+ P % d: g& V! H+ g9 }" g o. d. q# fill the contour with 255! y3 H. P# p% n% U2 O% O q, F5 T
cv2.fillPoly(mask, [points], (255, 255, 255))% |3 f( P: o6 U, o: b6 C
2 m8 T1 f" a7 ?# save the mask % s- w& q" d/ K' }6 k7 b/ D0 d' V
cv2.imwrite("mask/person_mask.png", mask) . o8 R. k6 S1 ~$ M2 s b0 h5 N1/ Z! A: @, T! S O
2 ' y u) D( E) I2 K) e c- Z0 M; W3 5 G% E2 G1 [ x. a& C3 D* Y- B4& n3 C2 Q* l# g0 L/ e
5- I9 w5 Q5 Z3 I7 c- \+ o
62 R7 A" O$ Y+ H* f7 l
7 # P# i4 g1 s6 X+ U& \/ k; Q8 * }/ u6 }( t; `1 F P9* j3 k0 h; v- O7 V
10 2 {3 o0 g: }; J. w: z3 X114 l1 M* P; I# t1 j9 u/ q
127 Z7 E, I7 b7 R3 F
13 ) H" c4 I9 }3 E! x14 ! M# j4 c9 j- v9 p' l2 Z! J154 M) l* k9 S- N+ i1 \- {4 V1 Y2 m
16( L) k1 a% Q' Q. H; e9 }3 l
17 : c5 x/ n( q- B5 N8 C. r& @18 6 O. B* K- t+ X0 p6 |19$ o% }/ a6 b) x' x0 E* M6 J
20 3 p5 |* Y8 n* T9 @8 z21 # f1 o* q) x2 S' i226 |5 K( d1 L" E9 f k) |, U! K4 X
230 D# _! z" j1 o7 ]- z" F+ A
242 @5 j( p2 g8 W# v
25 F" ~* U% S5 x- u3 h: }8 o26: i4 }5 }5 W+ H; G8 K
大概是这样:$ ^" M& |5 F8 `7 E" w" X! v7 N# y
# `) a& f1 Q8 c' j
/ h; A% g( y3 ]* f0 m
然后利用这个mask生成图片 ' U Y: y N1 Y' {0 y* H1 G% O( L$ g) s3 w, g& g
# 参考自: https://www.jianshu.com/p/1961aa0c02ee9 z/ |9 J5 h# C0 t2 D9 K+ f9 M1 N
import cv2 6 v1 a7 j2 d" i. Q5 u3 Z |8 aimport numpy as np ( r$ L* ]! i2 b- g* X" U7 D& @5 f6 M& N2 W- o8 y
1 _8 X8 Z* j4 l2 O' q- s# d& \! V
origin_png = 'origin_png/person.jpg' 8 z* Q5 M: `7 J$ |; p, U# maskPath = 'mask/person_mask.png' ; O, V' {2 J0 w$ YmaskPath = 'mask/bmv2.png', z. l" A7 N7 O1 C# c ?
result_png = 'result_png/result_png.png' % x- u. K7 ?" a4 Y& l; g% V3 V 1 q6 [1 r r4 s' ^4 f* T5 W" O: X8 Y ' K7 d3 @5 g5 q8 h. U" n+ d$ KmaskImg = cv2.imread(maskPath)9 N, `9 |: H$ B- c" q
img = cv2.imread(origin_png)' L% u! `' Y" w ]3 C- C6 o
assert maskImg.shape == img.shape, 'maskImg.shape != origin_png.shape' z/ ^1 E( i6 j& t8 B- \ ( Z! m/ d* E% [5 o) p1 H; p2 mh, w = img.shape[0], img.shape[1]. ~2 f A( E+ W
print('图片宽度: {}, 高度: {}'.format(h, w)) , {2 J W% F" u5 m" p, ^ " K& ~; h; l4 nrgb = (19,122,171) ; B8 _8 {1 L; n1 Pbgr = (rgb[2], rgb[1], rgb[0]) 9 i; p! h* ^1 }2 M) G- |& F# _# (B, G, R) % v' j3 T$ ~, Y% f5 ? hfor i in range(h):# b/ K/ T0 y8 V4 H, e! Y
for j in range(w):) \! d8 }2 ?6 Q% i2 ?
if (maskImg[i, j] == 0).all():2 }' U8 l2 g( C3 f% i8 Q
img[i, j] = bgr" Z0 p- J8 [) ~1 d3 `8 h: g
cv2.imwrite(result_png, img) 2 _0 m9 T* k8 n0 h, Iprint('图片写入 {} 成功'.format(result_png)) % w- f6 P+ J0 c. X1 : I6 R# S, {( s0 O2 v% z: n& e2! s8 Y* K( ~) Z3 q7 j. O$ E) \$ A. P
3 * X1 `/ R) \ X4* b: Y3 `3 L0 P- z! U% J; @
5 . h2 r3 T6 Q- N# i- c3 l6 W* n* L* [2 D& W( S k [7 2 J: k/ z7 G0 }* t1 N8 ' S% }# J* j; R- M Q/ C9% f3 U' K1 h, @! G
10 * B; M2 u! }% P @, c3 W$ M8 ^112 n, M6 ^" w: ]- G
12# U D/ ]6 @8 h) b% F$ _
13( }% o; {/ l+ i2 j( F
14. m/ u; v% G& I+ f; a* x! z
15 2 t$ n5 `! w: c- m$ a16 ! Q: Q6 l! f" z) x- ~17; ]1 x" E1 m7 ?3 |7 q) ^
18# F$ J% }( g2 ]
199 k) i" Y) A5 H+ r' Y
204 Q% @( M% X" Z: A, a. P
21, \% }; |' L( G$ c- r
22 k @% O5 v9 d) `231 M# e- V4 x" P" q2 _/ @8 J' S( ]( |
24; J0 ~- g. c- E' D
25 , j3 r* J' ^. @ o) C26: {' F1 G: T; N& q6 v# [) r0 J
27 3 V/ q& r. a H$ ]: k由于人长得一般,就不放图了… ) L* q: d2 o) z; s8 d* T : G. G2 h3 b! t/ Z) N. `缺点: , m: Q0 [2 r) ~2 m7 v, R* Plableme标注时挺费力,并且难以避免人与背景边缘会有残留红色像素的情况。 7 [5 u4 E( }; U% L1 b. n5 Q , ]* E! e1 ^7 C+ Q: x4 |- z8 E! \ r8 |. E3 ?
5 V: }4 X4 F' }* I: n6 y 3 S7 f' R2 d$ V! f5 W3 N D: t0 Z
方法二: 阈值 3 E$ [ c' {2 l/ z$ V该方法通过比较像素的RGB与背景的RGB来区分是否为图像背景。 + [4 b& \0 ~: r/ K: j ! h8 k& d2 U- X. V5 o- q) y, YOpencv $ B; f/ z' j: E9 vimport cv2 ; D+ S- d1 Z% w. I6 \7 m6 `import numpy as np3 Y" a3 j; }* H6 ]
: w/ C( c' g- t: s* E
, n6 {: h; @$ _6 E& y, j
def mean_square_loss(a_np, b_np): 8 U9 |# u% w9 i7 @ d! l sl = np.square(a_np - b_np) 2 r( p) i, @$ i8 j' F return np.mean(sl) ; U5 r: }. F% j, r : j4 T$ x' C# { ]5 k7 f8 k: ?6 O0 O5 M, u" Y3 Y7 R
def change_red2blue(origin_png, result_png): 0 Q/ a8 |. Q k1 T3 T! c img = cv2.imread(origin_png)9 z& q+ p$ O( b) U
& w$ s3 I. ?5 G& }$ ^% `& j
h, w = img.shape[0], img.shape[1] 6 t- T9 h9 d+ j print('图片宽度: {}, 高度: {}'.format(h, w)) - K8 Z p. M# U6 X! a7 x& u5 ~2 t W& \
origin_rgb = (168,36,32) # 可以用浏览器啥的控制台工具提取出背景的rgb值9 Z" `( e# y1 X! d$ d
origin_bgr = (origin_rgb[2], origin_rgb[1], origin_rgb[0]) 7 E; A" @- ?/ w" u/ r P& |2 U target_rgb = (19,122,171) # 蓝底RBG- m4 t2 e& i1 m/ I. W: c. S) J6 n
target_bgr = (target_rgb[2], target_rgb[1], target_rgb[0]) $ h6 k( \3 f$ ^; H; U5 p( j - h1 R: n d# f, a' J6 l for i in range(h):8 a9 L/ c# f. p# y& v$ `2 s
for j in range(w): c: b% N6 X" [: y # (B, G, R) G% c$ S* r. m0 m: I
if mean_square_loss(img[i, j], origin_bgr) < 50: 6 _ L* e9 J$ ?2 n8 X9 z img[i, j] = target_bgr 3 _2 {. R, R& O
6 P, \6 x$ x4 ?; m- p
cv2.imwrite(result_png, img)9 i9 T+ A3 r5 s
print('图片写入 {} 成功'.format(result_png))) d# s2 E. t* h) @4 P+ o8 F) o; s/ i
( v4 X' [8 L9 ?, c0 ^# Q6 } r% A5 R; {% U
if __name__ == '__main__': ~( c, g5 K) V% ?3 q" }0 C/ H5 f
# origin_png = 'result_png/result_png.png' 3 _9 p7 z( c+ f1 z; @ origin_png = 'origin_png/person.jpg'; T5 B5 H$ o! S" o, q3 P
result_png = 'result_png/result_refine.png' / u8 R% x7 I9 F1 C change_red2blue(origin_png, result_png) X. t; w1 k9 m* C3 c1 0 I; Z! {4 A5 N) N, G2 n) M2% x7 x+ k/ c9 g* B4 a7 w
3 & b4 f* ~+ B* p: J r4 m4 Z4 / G# o, h& Z, v w' _5, @/ K5 l- I; u+ H
6# ?! k, v% }4 H! K; x$ M1 D
7 }; ?5 ]6 A+ j$ h
80 x2 Q2 X. P) F& q
9 1 G4 s9 H2 D& M2 ~10 / \, z8 D" V7 }$ U# Z; V116 Z* Q! v. y4 R* k6 d2 Q2 ]0 r
12) J. w9 _0 ]0 l) b. D
139 }" y* k( o2 d5 l9 h
14- t3 _# F1 r9 H s' f$ n5 ?8 ?
15! {0 h# q M: s
16 + K: D/ L+ d% M, r17 ! K, c0 W; H% m7 I( I18 4 V/ o9 _ L; U0 Q9 F, q/ ]198 U- a) T* U; U
20+ m3 s2 `& g- A
21 : J/ a3 }" x0 W" Q' O22 + l0 |( B: e5 A' G5 q230 f9 u% [" K7 w& W% }& T5 O
24 % r: [% v1 L3 V4 Q! K+ H25 - ]& {7 A7 B( r) \26 6 D( r) ~3 K" M) ^27 8 `( L9 x$ {' I284 S( h- T8 Q+ Z' X+ M( z( h
29 2 V& @+ L4 c, A* V30 % Y# V- ~5 j* c313 O4 J" N7 _* l3 |
32 & a8 Z# K: _1 i6 g6 a5 E. q331 Y# u, R- ^* V! i% T& H' s
34( d4 I" B9 N5 Y
35 / F8 D, [8 o% Y6 b结果人与背景边缘仍会存在红色像素残留0 |$ S. N1 {% a% M+ Z& G8 j
- r s1 B. m# G
: O' X/ {! Y$ X' M$ Q0 ~
8 o4 l0 `8 j+ \# t9 K5 x6 P- N 3 N; v& D6 S2 |+ `6 s/ _ 9 w& F6 T" V9 w+ q. K8 qPIL: a4 b) N* E+ P4 s: P. A' B/ \
from torchvision.transforms.functional import to_tensor, to_pil_image" C$ }$ g3 v, m& ^# S9 y
from PIL import Image( y7 O* J% G; h2 i# c
import torch* h5 e/ N& n1 H7 F
import time 8 f& w0 X' O5 p" F; I; {; E; d * O, M! C) V; ?% a) p. w2 V ' r" |" g1 ~! v: u& Adef mean_square_loss(a_ts, b_ts):; v' n# x6 p. q+ `. F! @
# print(a_ts.shape) 7 k, ` P+ t: Z/ O% ]4 Z # print(b_ts)! r, p$ E* ~. D1 t, t$ d
sl = (a_ts - b_ts) ** 2 0 I) S% |4 f* R3 x+ d return sl.sum() f o" q+ i3 x* z" N