Initial commit of project
This commit is contained in:
70
predictors/predict_age.py
Normal file
70
predictors/predict_age.py
Normal file
@@ -0,0 +1,70 @@
|
||||
import cv2
|
||||
import dlib
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.keras.models import Model
|
||||
from tensorflow.python.keras.applications import ResNet50
|
||||
from tensorflow.python.keras.layers import Dense
|
||||
|
||||
import config
|
||||
|
||||
|
||||
def get_age_model():
|
||||
age_model = ResNet50(
|
||||
include_top=False,
|
||||
weights='imagenet',
|
||||
input_shape=(config.RESNET50_DEFAULT_IMG_WIDTH, config.RESNET50_DEFAULT_IMG_WIDTH, 3),
|
||||
pooling='avg'
|
||||
)
|
||||
|
||||
prediction = Dense(units=101,
|
||||
kernel_initializer='he_normal',
|
||||
use_bias=False,
|
||||
activation='softmax',
|
||||
name='pred_age')(age_model.output)
|
||||
|
||||
age_model = Model(inputs=age_model.input, outputs=prediction)
|
||||
return age_model
|
||||
|
||||
|
||||
def get_model():
|
||||
base_model = get_age_model()
|
||||
base_model.load_weights(config.AGE_TRAINED_MODEL_PATH)
|
||||
print('Loaded weights from age classifier')
|
||||
|
||||
return base_model
|
||||
|
||||
def get_trained_model():
|
||||
_model = get_model()
|
||||
return _model
|
||||
|
||||
model = get_trained_model()
|
||||
#detector = dlib.get_frontal_face_detector()
|
||||
|
||||
graph = tf.get_default_graph()
|
||||
|
||||
def predict_age(detected_faces, img):
|
||||
input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
||||
img_h, img_w, _ = np.shape(input_img)
|
||||
|
||||
faces = np.empty((len(detected_faces), config.RESNET50_DEFAULT_IMG_WIDTH,
|
||||
config.RESNET50_DEFAULT_IMG_WIDTH, 3))
|
||||
|
||||
|
||||
for i, d in enumerate(detected_faces):
|
||||
x1, y1, x2, y2, w, h = d.left(), d.top(), d.right() + 1, \
|
||||
d.bottom() + 1, d.width(), d.height()
|
||||
xw1 = max(int(x1 - config.MARGIN * w), 0)
|
||||
yw1 = max(int(y1 - config.MARGIN * h), 0)
|
||||
xw2 = min(int(x2 + config.MARGIN * w), img_w - 1)
|
||||
yw2 = min(int(y2 + config.MARGIN * h), img_h - 1)
|
||||
|
||||
faces[i, :, :, :] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1, :], (
|
||||
config.RESNET50_DEFAULT_IMG_WIDTH,
|
||||
config.RESNET50_DEFAULT_IMG_WIDTH)) / 255.00
|
||||
|
||||
with graph.as_default():
|
||||
results = model.predict(faces)
|
||||
ages = np.arange(0, 101).reshape(101, 1)
|
||||
predicted_ages = results.dot(ages).flatten()
|
||||
return predicted_ages
|
||||
128
predictors/predict_beauty.py
Normal file
128
predictors/predict_beauty.py
Normal file
@@ -0,0 +1,128 @@
|
||||
from keras.layers import Conv2D, Input, MaxPool2D,Flatten, Dense, Permute, GlobalAveragePooling2D
|
||||
from keras.models import Model
|
||||
from keras.optimizers import Adam
|
||||
import numpy as np
|
||||
import pickle
|
||||
import keras
|
||||
import cv2
|
||||
import sys
|
||||
import dlib
|
||||
import os.path
|
||||
from keras.models import Sequential
|
||||
from keras.applications.resnet50 import ResNet50
|
||||
#from keras.applications.resnet50 import Dense
|
||||
from keras.layers.core import Dense
|
||||
from keras.optimizers import Adam
|
||||
import pickle
|
||||
import numpy as np
|
||||
import cv2
|
||||
import os
|
||||
from keras.layers import Dropout
|
||||
import config
|
||||
|
||||
#APP_ROOT = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
#parent_path = os.path.dirname(APP_ROOT)
|
||||
#parent_path = os.path.dirname(parent_path)
|
||||
#model_path = parent_path+"/common/mmod_human_face_detector.dat"
|
||||
#cnn_face_detector = dlib.cnn_face_detection_model_v1(model_path)
|
||||
cnn_face_detector = dlib.cnn_face_detection_model_v1(config.CNN_FACE_DETECTOR_MODEL_PATH)
|
||||
resnet = ResNet50(include_top=False, pooling='avg')
|
||||
beautyModel = Sequential()
|
||||
beautyModel.add(resnet)
|
||||
beautyModel.add(Dense(5, activation='softmax'))
|
||||
beautyModel.layers[0].trainable = False
|
||||
|
||||
beautyModel.load_weights(config.BEAUTY_MODEL_WEIGHTS_PATH)
|
||||
|
||||
def score_mapping(modelScore):
|
||||
|
||||
if modelScore <= 1.9:
|
||||
mappingScore = ((4 - 2.5) / (1.9 - 1.0)) * (modelScore-1.0) + 2.5
|
||||
elif modelScore <= 2.8:
|
||||
mappingScore = ((5.5 - 4) / (2.8 - 1.9)) * (modelScore-1.9) + 4
|
||||
elif modelScore <= 3.4:
|
||||
mappingScore = ((6.5 - 5.5) / (3.4 - 2.8)) * (modelScore-2.8) + 5.5
|
||||
elif modelScore <= 4:
|
||||
mappingScore = ((8 - 6.5) / (4 - 3.4)) * (modelScore-3.4) + 6.5
|
||||
elif modelScore < 5:
|
||||
mappingScore = ((9 - 8) / (5 - 4)) * (modelScore-4) + 8
|
||||
|
||||
return mappingScore
|
||||
|
||||
def predict_beauty(img):
|
||||
#im0 = cv2.imread(imgPath)
|
||||
im0 = img
|
||||
|
||||
if im0.shape[0] > 1280:
|
||||
new_shape = (1280, im0.shape[1] * 1280 / im0.shape[0])
|
||||
elif im0.shape[1] > 1280:
|
||||
new_shape = (im0.shape[0] * 1280 / im0.shape[1], 1280)
|
||||
elif im0.shape[0] < 640 or im0.shape[1] < 640:
|
||||
new_shape = (im0.shape[0] * 2, im0.shape[1] * 2)
|
||||
else:
|
||||
new_shape = im0.shape[0:2]
|
||||
|
||||
im = cv2.resize(im0, (int(new_shape[1]), int(new_shape[0])))
|
||||
dets = cnn_face_detector(im, 0)
|
||||
|
||||
for i, d in enumerate(dets):
|
||||
face = [d.rect.left(), d.rect.top(), d.rect.right(), d.rect.bottom()]
|
||||
croped_im = im[face[1]:face[3], face[0]:face[2], :]
|
||||
resized_im = cv2.resize(croped_im, (224, 224))
|
||||
normed_im = np.array([(resized_im - 127.5) / 127.5])
|
||||
|
||||
pred = beautyModel.predict(normed_im)
|
||||
ldList = pred[0]
|
||||
out = 1 * ldList[0] + 2 * ldList[1] + 3 * ldList[2] + 4 * ldList[3] + 5 * ldList[4]
|
||||
|
||||
out = score_mapping(out)
|
||||
return out
|
||||
|
||||
# print(img + " 打分:" + str('%.2f' % (out)))
|
||||
# cv2.rectangle(im, (face[0], face[1]), (face[2], face[3]), (0, 255, 0), 3)
|
||||
# cv2.putText(im, str('%.2f' % (out)), (face[0], face[3]), cv2.FONT_HERSHEY_SIMPLEX,
|
||||
# 1, (0, 0, 255), 2)
|
||||
#
|
||||
# ret = path + "/output-" + img
|
||||
# cv2.imwrite(ret, im)
|
||||
# return ret
|
||||
|
||||
def predict_beauty_from_faces(detected_faces, img):
|
||||
outList = []
|
||||
im0 = img
|
||||
|
||||
if im0.shape[0] > 1280:
|
||||
new_shape = (1280, im0.shape[1] * 1280 / im0.shape[0])
|
||||
elif im0.shape[1] > 1280:
|
||||
new_shape = (im0.shape[0] * 1280 / im0.shape[1], 1280)
|
||||
elif im0.shape[0] < 640 or im0.shape[1] < 640:
|
||||
new_shape = (im0.shape[0] * 2, im0.shape[1] * 2)
|
||||
else:
|
||||
new_shape = im0.shape[0:2]
|
||||
|
||||
im = cv2.resize(im0, (int(new_shape[1]), int(new_shape[0])))
|
||||
|
||||
for i, d in enumerate(detected_faces):
|
||||
face = [d.left(), d.top(), d.right(), d.bottom()]
|
||||
croped_im = im[face[1]:face[3], face[0]:face[2], :]
|
||||
resized_im = cv2.resize(croped_im, (224, 224))
|
||||
normed_im = np.array([(resized_im - 127.5) / 127.5])
|
||||
|
||||
pred = beautyModel.predict(normed_im)
|
||||
ldList = pred[0]
|
||||
out = 1 * ldList[0] + 2 * ldList[1] + 3 * ldList[2] + 4 * ldList[3] + 5 * ldList[4]
|
||||
|
||||
out = score_mapping(out)
|
||||
outList.append(out)
|
||||
return outList
|
||||
|
||||
# beauty_predict(parent_path+"/samples/image",'fengjie.jpg')
|
||||
# beauty_predict(parent_path+"/samples/image",'nenghua.jpg')
|
||||
# beauty_predict(parent_path+"/samples/image",'shunli.jpg')
|
||||
# beauty_predict(parent_path+"/samples/image",'test1.jpg')
|
||||
# beauty_predict(parent_path+"/samples/image",'test2.jpg')
|
||||
# beauty_predict(parent_path+"/samples/image",'test3.jpg')
|
||||
# beauty_predict(parent_path+"/samples/image",'fty1845.jpg')
|
||||
#beauty_predict(parent_path+"/samples/image",'fty1959.jpg')
|
||||
# beauty_predict(parent_path+"/samples/image",'jiyou.png')
|
||||
78
predictors/predict_bmi.py
Normal file
78
predictors/predict_bmi.py
Normal file
@@ -0,0 +1,78 @@
|
||||
import cv2
|
||||
import dlib
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from tensorflow.python.keras.models import Model
|
||||
from tensorflow.python.keras.applications import ResNet50
|
||||
from tensorflow.python.keras.layers import Dense
|
||||
|
||||
import config
|
||||
|
||||
|
||||
def get_age_model():
|
||||
age_model = ResNet50(
|
||||
include_top=False,
|
||||
weights='imagenet',
|
||||
input_shape=(config.RESNET50_DEFAULT_IMG_WIDTH, config.RESNET50_DEFAULT_IMG_WIDTH, 3),
|
||||
pooling='avg'
|
||||
)
|
||||
|
||||
prediction = Dense(units=101,
|
||||
kernel_initializer='he_normal',
|
||||
use_bias=False,
|
||||
activation='softmax',
|
||||
name='pred_age')(age_model.output)
|
||||
|
||||
age_model = Model(inputs=age_model.input, outputs=prediction)
|
||||
return age_model
|
||||
|
||||
|
||||
def get_model(ignore_age_weights=False):
|
||||
base_model = get_age_model()
|
||||
if not ignore_age_weights:
|
||||
base_model.load_weights(config.AGE_TRAINED_WEIGHTS_FILE)
|
||||
print('Loaded weights from age classifier')
|
||||
last_hidden_layer = base_model.get_layer(index=-2)
|
||||
|
||||
base_model = Model(
|
||||
inputs=base_model.input,
|
||||
outputs=last_hidden_layer.output)
|
||||
prediction = Dense(1, kernel_initializer='normal')(base_model.output)
|
||||
|
||||
model = Model(inputs=base_model.input, outputs=prediction)
|
||||
return model
|
||||
|
||||
def get_trained_model():
|
||||
weights_file = config.MODEL_WEIGHTS_PATH
|
||||
_model = get_model(ignore_age_weights=True)
|
||||
_model.load_weights(weights_file)
|
||||
return _model
|
||||
|
||||
model = get_trained_model()
|
||||
#detector = dlib.get_frontal_face_detector()
|
||||
|
||||
graph = tf.get_default_graph()
|
||||
|
||||
def predict_bmi(detected_faces, img):
|
||||
input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
|
||||
img_h, img_w, _ = np.shape(input_img)
|
||||
|
||||
faces = np.empty((len(detected_faces), config.RESNET50_DEFAULT_IMG_WIDTH,
|
||||
config.RESNET50_DEFAULT_IMG_WIDTH, 3))
|
||||
|
||||
|
||||
for i, d in enumerate(detected_faces):
|
||||
x1, y1, x2, y2, w, h = d.left(), d.top(), d.right() + 1, \
|
||||
d.bottom() + 1, d.width(), d.height()
|
||||
xw1 = max(int(x1 - config.MARGIN * w), 0)
|
||||
yw1 = max(int(y1 - config.MARGIN * h), 0)
|
||||
xw2 = min(int(x2 + config.MARGIN * w), img_w - 1)
|
||||
yw2 = min(int(y2 + config.MARGIN * h), img_h - 1)
|
||||
|
||||
faces[i, :, :, :] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1, :], (
|
||||
config.RESNET50_DEFAULT_IMG_WIDTH,
|
||||
config.RESNET50_DEFAULT_IMG_WIDTH)) / 255.00
|
||||
|
||||
with graph.as_default():
|
||||
predictions = model.predict(faces)
|
||||
return predictions
|
||||
72
predictors/predict_gender.py
Normal file
72
predictors/predict_gender.py
Normal file
@@ -0,0 +1,72 @@
|
||||
import os
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from keras.models import load_model
|
||||
from PIL import Image
|
||||
import config
|
||||
|
||||
gender_classifier = load_model(config.GENDER_MODEL_PATH, compile=False)
|
||||
|
||||
# https://github.com/keras-team/keras/issues/6462
|
||||
gender_classifier._make_predict_function()
|
||||
|
||||
gender_target_size = gender_classifier.input_shape[1:3]
|
||||
gender_offsets = (10, 10)
|
||||
|
||||
|
||||
def detect_genders(faceBoundary, image):
|
||||
result = ""
|
||||
face = get_boundary_box(faceBoundary, np.shape(image))
|
||||
x1, x2, y1, y2 = apply_offsets(face, gender_offsets)
|
||||
rgb_face = image[y1:y2, x1:x2]
|
||||
|
||||
try:
|
||||
rgb_face = cv2.resize(rgb_face, gender_target_size)
|
||||
except Exception as e:
|
||||
print('Error while resizing the image', e)
|
||||
return result
|
||||
|
||||
rgb_face = pre_process_input(rgb_face)
|
||||
rgb_face = np.expand_dims(rgb_face, 0)
|
||||
gender_prediction = gender_classifier.predict(rgb_face)
|
||||
result = {
|
||||
'woman': float(gender_prediction[0][0]),
|
||||
'man': float(gender_prediction[0][1])
|
||||
}
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def apply_offsets(face_coordinates, offsets):
|
||||
x, y, width, height = face_coordinates
|
||||
x_off, y_off = offsets
|
||||
return max(x - x_off, 0), x + width + x_off, max(y - y_off, 0), y + height + y_off
|
||||
|
||||
|
||||
def pre_process_input(x, v2=False):
|
||||
x = x.astype('float32')
|
||||
x = x / 255.0
|
||||
if v2:
|
||||
x = x - 0.5
|
||||
x = x * 2.0
|
||||
return x
|
||||
|
||||
|
||||
def load_image(imageFilename):
|
||||
try:
|
||||
img = Image.open(imageFilename)
|
||||
img = img.convert('RGB')
|
||||
return np.array(img)
|
||||
except Exception as e:
|
||||
print("Not a valid image found on {0}: {1}".format(imageFilename, e))
|
||||
return None
|
||||
|
||||
|
||||
def get_boundary_box(boundary, image_shape):
|
||||
X = max(boundary[3], 0)
|
||||
Y = max(boundary[0], 0)
|
||||
width = min(abs(X - boundary[1]), image_shape[0])
|
||||
height = min(abs(Y - boundary[2]), image_shape[1])
|
||||
return [X, Y, width, height]
|
||||
|
||||
Reference in New Issue
Block a user