基于Opencv制作的美顏相機帶你領略美顏特效的效果
導語

現(xiàn)在每一次出門,女友就喜歡拍照!BUT嫌棄我給拍的照片角度不對,采光不好.......

總之一大堆理由,啥時候讓我拍照的水平能有美顏相機三分之一的效果就好!

果然都是鍛煉出來的,至少現(xiàn)在我能看出來朋友圈哪些小姐姐批沒批過照片。

逃不掉

逃不掉啊,為了擺脫這種局面——
立馬給女友寫了一款簡易版本的美顏相機給她偷偷的用!這樣子就不擔心被錘了。機智如我.jpg
正文
環(huán)境安裝:
dlib庫的安裝 本博客提供三種方法進行安裝 T1方法:pip install dlib 此方法是需要在你安裝cmake、Boost環(huán)境的計算機使用 。 T2方法:conda install -c menpo dlib=18.18此方法適合那些已經(jīng)安裝好conda庫的環(huán)境的計算機使用。 T3方法:pip install dlib-19.8.1-cp36-cp36m-win_amd64.whl dlib庫的whl文件——dlib-19.7.0-cp36-cp36m-win_amd64.rar dlib-19.3.1-cp35-cp35m-win_amd64.whl
cv2庫安裝方法: pip install opencv-python
人臉五官,坐標、進行高斯模糊處理等等。
# 五官 class Organ(): def __init__(self, img, img_hsv, temp_img, temp_hsv, landmarks, name, ksize=None): self.img = img self.img_hsv = img_hsv self.landmarks = landmarks self.name = name self.get_rect() self.shape = (int(self.bottom-self.top), int(self.right-self.left)) self.size = self.shape[0] * self.shape[1] * 3 self.move = int(np.sqrt(self.size/3)/20) self.ksize = self.get_ksize() self.patch_img, self.patch_hsv = self.get_patch(self.img), self.get_patch(self.img_hsv) self.set_temp(temp_img, temp_hsv) self.patch_mask = self.get_mask_relative() # 獲取定位方框 def get_rect(self): y, x = self.landmarks[:, 1], self.landmarks[:, 0] self.top, self.bottom, self.left, self.right = np.min(y), np.max(y), np.min(x), np.max(x) # 獲得ksize,高斯模糊處理的參數(shù) def get_ksize(self, rate=15): size = max([int(np.sqrt(self.size/3)/rate), 1]) size = (size if size%2==1 else size+1) return(size, size) # 截取局部切片 def get_patch(self, img): shape = img.shape return img[np.max([self.top-self.move, 0]): np.min([self.bottom+self.move, shape[0]]), np.max([self.left-self.move, 0]): np.min([self.right+self.move, shape[1]])] def set_temp(self, temp_img, temp_hsv): self.img_temp, self.hsv_temp = temp_img, temp_hsv self.patch_img_temp, self.patch_hsv_temp = self.get_patch(self.img_temp), self.get_patch(self.hsv_temp) # 確認 def confirm(self): self.img[:], self.img_hsv[:] = self.img_temp[:], self.hsv_temp[:] # 更新 def update_temp(self): self.img_temp[:], self.hsv_temp[:] = self.img[:], self.img_hsv[:] # 勾畫凸多邊形 def _draw_convex_hull(self, img, points, color): points = cv2.convexHull(points) cv2.fillConvexPoly(img, points, color=color) # 獲得局部相對坐標遮蓋 def get_mask_relative(self, ksize=None): if ksize == None: ksize = self.ksize landmarks_re = self.landmarks.copy() landmarks_re[:, 1] -= np.max([self.top-self.move, 0]) landmarks_re[:, 0] -= np.max([self.left-self.move, 0]) mask = np.zeros(self.patch_img.shape[:2], dtype=np.float64) self._draw_convex_hull(mask, landmarks_re, color=1) mask = np.array([mask, mask, mask]).transpose((1, 2, 0)) mask = (cv2.GaussianBlur(mask, ksize, 0) > 0) * 1.0 return cv2.GaussianBlur(mask, ksize, 0)[:] # 獲得全局絕對坐標遮蓋 def get_mask_abs(self, ksize=None): if ksize == None: ksize = self.ksize mask = np.zeros(self.img.shape, dtype=np.float64) patch = self.get_patch(mask) patch[:] = self.patch_mask[:] return mask
主要美顏效果進行的處理如下:
# 美白
def whitening(self, rate=0.15, confirm=True):
if confirm:
self.confirm()
self.patch_hsv[:, :, -1] = np.minimum(self.patch_hsv[:, :, -1]+self.patch_hsv[:, :, -1]*self.patch_mask[:, :, -1]*rate, 255).astype('uint8')
self.img[:]=cv2.cvtColor(self.img_hsv, cv2.COLOR_HSV2BGR)[:]
self.update_temp()
else:
self.patch_hsv_temp[:] = cv2.cvtColor(self.patch_img_temp, cv2.COLOR_BGR2HSV)[:]
self.patch_hsv_temp[:, :, -1] = np.minimum(self.patch_hsv_temp[:, :, -1]+self.patch_hsv_temp[:, :, -1]*self.patch_mask[:, :, -1]*rate, 255).astype('uint8')
self.patch_img_temp[:] = cv2.cvtColor(self.patch_hsv_temp, cv2.COLOR_HSV2BGR)[:]
# 提升鮮艷度
def brightening(self, rate=0.3, confirm=True):
patch_mask = self.get_mask_relative((1, 1))
if confirm:
self.confirm()
patch_new = self.patch_hsv[:, :, 1]*patch_mask[:, :, 1]*rate
patch_new = cv2.GaussianBlur(patch_new, (3, 3), 0)
self.patch_hsv[:, :, 1] = np.minimum(self.patch_hsv[:, :, 1]+patch_new, 255).astype('uint8')
self.img[:]=cv2.cvtColor(self.img_hsv, cv2.COLOR_HSV2BGR)[:]
self.update_temp()
else:
self.patch_hsv_temp[:] = cv2.cvtColor(self.patch_img_temp, cv2.COLOR_BGR2HSV)[:]
patch_new = self.patch_hsv_temp[:, :, 1]*patch_mask[:, :, 1]*rate
patch_new = cv2.GaussianBlur(patch_new, (3, 3), 0)
self.patch_hsv_temp[:, :, 1] = np.minimum(self.patch_hsv[:, :, 1]+patch_new, 255).astype('uint8')
self.patch_img_temp[:] = cv2.cvtColor(self.patch_hsv_temp, cv2.COLOR_HSV2BGR)[:]
# 磨平
def smooth(self, rate=0.6, ksize=None, confirm=True):
if ksize == None:
ksize=self.get_ksize(80)
index = self.patch_mask > 0
if confirm:
self.confirm()
patch_new = cv2.GaussianBlur(cv2.bilateralFilter(self.patch_img, 3, *ksize), ksize, 0)
self.patch_img[index] = np.minimum(rate*patch_new[index]+(1-rate)*self.patch_img[index], 255).astype('uint8')
self.img_hsv[:] = cv2.cvtColor(self.img, cv2.COLOR_BGR2HSV)[:]
self.update_temp()
else:
patch_new = cv2.GaussianBlur(cv2.bilateralFilter(self.patch_img_temp, 3, *ksize), ksize, 0)
self.patch_img_temp[index] = np.minimum(rate*patch_new[index]+(1-rate)*self.patch_img_temp[index], 255).astype('uint8')
self.patch_hsv_temp[:] = cv2.cvtColor(self.patch_img_temp, cv2.COLOR_BGR2HSV)[:]
# 銳化
def sharpen(self, rate=0.3, confirm=True):
patch_mask = self.get_mask_relative((3, 3))
kernel = np.zeros((9, 9), np.float32)
kernel[4, 4] = 2.0
boxFilter = np.ones((9, 9), np.float32) / 81.0
kernel = kernel - boxFilter
index = patch_mask > 0
if confirm:
self.confirm()
sharp = cv2.filter2D(self.patch_img, -1, kernel)
self.patch_img[index] = np.minimum(((1-rate)*self.patch_img)[index]+sharp[index]*rate, 255).astype('uint8')
self.update_temp()
else:
sharp = cv2.filter2D(self.patch_img_temp, -1, kernel)
self.patch_img_temp[:] = np.minimum(self.patch_img_temp+self.patch_mask*sharp*rate, 255).astype('uint8')
self.patch_hsv_temp[:] = cv2.cvtColor(self.patch_img_temp, cv2.COLOR_BGR2HSV)[:]
# 額頭
class ForeHead(Organ):
def __init__(self, img, img_hsv, temp_img, temp_hsv, landmarks, mask_organs, name, ksize=None):
self.mask_organs = mask_organs
super(ForeHead, self).__init__(img, img_hsv, temp_img, temp_hsv, landmarks, name, ksize)
# 獲得局部相對坐標mask
def get_mask_relative(self, ksize=None):
if ksize == None:
ksize = self.ksize
landmarks_re = self.landmarks.copy()
landmarks_re[:, 1] -= np.max([self.top-self.move, 0])
landmarks_re[:, 0] -= np.max([self.left-self.move, 0])
mask = np.zeros(self.patch_img.shape[:2], dtype=np.float64)
self._draw_convex_hull(mask, landmarks_re, color=1)
mask = np.array([mask, mask, mask]).transpose((1, 2, 0))
mask = (cv2.GaussianBlur(mask, ksize, 0) > 0) * 1.0
patch_organs = self.get_patch(self.mask_organs)
mask= cv2.GaussianBlur(mask, ksize, 0)[:]
mask[patch_organs>0] = (1-patch_organs[patch_organs>0])
return mask
# 臉類
class Face(Organ):
def __init__(self, img, img_hsv, temp_img, temp_hsv, landmarks, index):
self.index = index
# 五官:下巴、嘴、鼻子、左右眼、左右耳
self.organs_name = ['jaw', 'mouth', 'nose', 'left_eye', 'right_eye', 'left_brow', 'right_brow']
# 五官標記點
self.organs_point = [list(range(0, 17)), list(range(48, 61)),
list(range(27, 35)), list(range(42, 48)),
list(range(36, 42)), list(range(22, 27)),
list(range(17, 22))]
self.organs = {name: Organ(img, img_hsv, temp_img, temp_hsv, landmarks[points], name) for name, points in zip(self.organs_name, self.organs_point)}
# 額頭
mask_nose = self.organs['nose'].get_mask_abs()
mask_organs = (self.organs['mouth'].get_mask_abs()+mask_nose+self.organs['left_eye'].get_mask_abs()+self.organs['right_eye'].get_mask_abs()+self.organs['left_brow'].get_mask_abs()+self.organs['right_brow'].get_mask_abs())
forehead_landmark = self.get_forehead_landmark(img, landmarks, mask_organs, mask_nose)
self.organs['forehead'] = ForeHead(img, img_hsv, temp_img, temp_hsv, forehead_landmark, mask_organs, 'forehead')
mask_organs += self.organs['forehead'].get_mask_abs()
# 人臉的完整標記點
self.FACE_POINTS = np.concatenate([landmarks, forehead_landmark])
super(Face, self).__init__(img, img_hsv, temp_img, temp_hsv, self.FACE_POINTS, 'face')
mask_face = self.get_mask_abs() - mask_organs
self.patch_mask = self.get_patch(mask_face)
# 計算額頭坐標
def get_forehead_landmark(self, img, face_landmark, mask_organs, mask_nose):
radius = (np.linalg.norm(face_landmark[0]-face_landmark[16])/2).astype('int32')
center_abs = tuple(((face_landmark[0]+face_landmark[16])/2).astype('int32'))
angle = np.degrees(np.arctan((lambda l:l[1]/l[0])(face_landmark[16]-face_landmark[0]))).astype('int32')
mask = np.zeros(mask_organs.shape[:2], dtype=np.float64)
cv2.ellipse(mask, center_abs, (radius, radius), angle, 180, 360, 1, -1)
# 剔除與五官重合部分
mask[mask_organs[:, :, 0]>0]=0
# 根據(jù)鼻子的膚色判斷真正的額頭面積
index_bool = []
for ch in range(3):
mean, std = np.mean(img[:, :, ch][mask_nose[:, :, ch]>0]), np.std(img[:, :, ch][mask_nose[:, :, ch]>0])
up, down = mean+0.5*std, mean-0.5*std
index_bool.append((img[:, :, ch]<down)|(img[:, :, ch]>up))
index_zero = ((mask>0)&index_bool[0]&index_bool[1]&index_bool[2])
mask[index_zero] = 0
index_abs = np.array(np.where(mask>0)[::-1]).transpose()
landmark = cv2.convexHull(index_abs).squeeze()
return landmark
# 化妝器
class Makeup():
def __init__(self, predictor_path='./predictor/shape_predictor_68_face_landmarks.dat'):
self.photo_path = []
self.predictor_path = predictor_path
self.faces = {}
# 人臉檢測與特征提取
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor(self.predictor_path)
# 人臉定位和特征提取
# img為numpy數(shù)組
# 返回值為人臉特征(x, y)坐標的矩陣
def get_faces(self, img, img_hsv, temp_img, temp_hsv, name, n=1):
rects = self.detector(img, 1)
if len(rects) < 1:
print('[Warning]:No face detected...')
return None
return {name: [Face(img, img_hsv, temp_img, temp_hsv, np.array([[p.x, p.y] for p in self.predictor(img, rect).parts()]), i) for i, rect in enumerate(rects)]}
# 讀取圖片
def read_img(self, fname, scale=1):
img = cv2.imdecode(np.fromfile(fname, dtype=np.uint8), -1)
if not type(img):
print('[ERROR]:Fail to Read %s' % fname)
return None
return img
def read_and_mark(self, fname):
img = self.read_img(fname)
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
temp_img, temp_hsv = img.copy(), img_hsv.copy()
return img, temp_img, self.get_faces(img, img_hsv, temp_img, temp_hsv, fname)
效果如下:


嘿嘿——小姐姐美顏之后是不是白了很多吖!
總結
本次文章就到這里啦!如需完整的打包好的項目源碼基地見:#私信小編06#即可免費領??!
記得關注、評論、點贊三連哦~

到此這篇關于基于Opencv制作的美顏相機帶你領略美顏特效的效果的文章就介紹到這了,更多相關Opencv 美顏相機內容請搜索本站以前的文章或繼續(xù)瀏覽下面的相關文章希望大家以后多多支持本站!
版權聲明:本站文章來源標注為YINGSOO的內容版權均為本站所有,歡迎引用、轉載,請保持原文完整并注明來源及原文鏈接。禁止復制或仿造本網(wǎng)站,禁止在非maisonbaluchon.cn所屬的服務器上建立鏡像,否則將依法追究法律責任。本站部分內容來源于網(wǎng)友推薦、互聯(lián)網(wǎng)收集整理而來,僅供學習參考,不代表本站立場,如有內容涉嫌侵權,請聯(lián)系alex-e#qq.com處理。
關注官方微信