78 lines
2.9 KiB
Python
78 lines
2.9 KiB
Python
import cv2
|
|
import dlib
|
|
import numpy as np
|
|
import tensorflow as tf
|
|
|
|
import config
|
|
from predictors.predict_gender import detect_genders
|
|
from predictors.predict_bmi import predict_bmi
|
|
from predictors.predict_age import predict_age
|
|
#from predictors.predict_beauty import predict_beauty
|
|
|
|
detector = dlib.get_frontal_face_detector()
|
|
|
|
|
|
def extract_metadata(image_file_path):
|
|
img = cv2.imread(image_file_path)
|
|
# img = cv2.resize(img, (640, 480))
|
|
input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
|
img_h, img_w, _ = np.shape(input_img)
|
|
|
|
detected, scores, idx = detector.run(input_img, 1)
|
|
faces = np.empty((len(detected), config.RESNET50_DEFAULT_IMG_WIDTH,
|
|
config.RESNET50_DEFAULT_IMG_WIDTH, 3))
|
|
filteredFaces = []
|
|
|
|
for i, d in enumerate(detected):
|
|
x1, y1, x2, y2, w, h = d.left(), d.top(), d.right() + 1, \
|
|
d.bottom() + 1, d.width(), d.height()
|
|
faceSize = (d.right() - d.left()) * (d.bottom() - d.top())
|
|
faceSizePercentage = (faceSize / (img_w * img_h)) * 100
|
|
xw1 = max(int(x1 - config.MARGIN * w), 0)
|
|
yw1 = max(int(y1 - config.MARGIN * h), 0)
|
|
xw2 = min(int(x2 + config.MARGIN * w), img_w - 1)
|
|
yw2 = min(int(y2 + config.MARGIN * h), img_h - 1)
|
|
# cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
|
|
cv2.imwrite(''.join(image_file_path.split('.')[:-1]) + '-cropped-' + str(i) + '.jpg',
|
|
img[yw1:yw2, xw1:xw2])
|
|
faces[i, :, :, :] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1, :], (
|
|
config.RESNET50_DEFAULT_IMG_WIDTH,
|
|
config.RESNET50_DEFAULT_IMG_WIDTH)) / 255.00
|
|
|
|
face_boundary = (d.top(), d.right(), d.bottom(), d.left())
|
|
gender = detect_genders(face_boundary, input_img)
|
|
|
|
# FILTERING
|
|
female = gender["woman"] >= 0.5
|
|
# faceSizePercentage > config.MIN_FACE_SIZE_PERCENTAGE:
|
|
if female and faceSize > config.MIN_FACE_SIZE_PIXELS and idx[i] == 0:
|
|
filteredFaces.append((d, faceSize, faceSizePercentage, gender["woman"]))
|
|
results = []
|
|
for i, d in enumerate(filteredFaces):
|
|
bmiResult = predict_bmi([d[0]], img)
|
|
if len(bmiResult) > 0:
|
|
bmiResult = float(str(bmiResult[0][0]))
|
|
else:
|
|
bmiResult = None
|
|
ageResult = predict_age([d[0]], img)
|
|
if len(ageResult) > 0:
|
|
ageResult = float(str(ageResult[0]))
|
|
else:
|
|
ageResult = None
|
|
|
|
# beautyResult = predict_beauty([d[0]], img)
|
|
results.append({
|
|
"face": {
|
|
"left": d[0].left(),
|
|
"top": d[0].top(),
|
|
"right": d[0].right(),
|
|
"bottom": d[0].bottom(),
|
|
"size": d[1],
|
|
"sizePercentage": d[2]
|
|
},
|
|
"gender_woman": d[3],
|
|
"bmi": bmiResult,
|
|
"age": ageResult
|
|
})
|
|
return results
|