Initial commit of project

This commit is contained in:
2021-02-16 00:20:14 -05:00
parent f818047117
commit 58de7cce9f
14 changed files with 588 additions and 0 deletions

37
config.py Normal file
View File

@@ -0,0 +1,37 @@
import os
ENV = "dev" # [dev/test/prod]
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
print("PROJECT_DIR:\t", PROJECT_DIR)
DATA_FOLDER = os.path.join(PROJECT_DIR, 'data')
MODELS_FOLDER = os.path.join(PROJECT_DIR, 'models')
LOGS_FOLDER = os.path.join(PROJECT_DIR, 'logs')
if ENV == "dev":
print("DATA_FOLDER:\t", DATA_FOLDER)
print("MODELS_FOLDER:\t", MODELS_FOLDER)
print("LOGS_FOLDER:\t", LOGS_FOLDER)
MODEL_WEIGHTS_PATH = os.path.join(MODELS_FOLDER, "bmi_model_weights.h5")
GENDER_MODEL_PATH = os.path.join(MODELS_FOLDER, "simple_CNN.81-0.96.hdf5")
AGE_TRAINED_MODEL_PATH = os.path.join(MODELS_FOLDER, "age_only_resnet50_weights.061-3.300-4.410.hdf5")
CNN_FACE_DETECTOR_MODEL_PATH = os.path.join(MODELS_FOLDER, "mmod_human_face_detector.dat")
BEAUTY_MODEL_WEIGHTS_PATH = os.path.join(MODELS_FOLDER, "beauty_model-ldl-resnet.h5")
RESNET50_DEFAULT_IMG_WIDTH = 224
MARGIN = .1
TRAIN_BATCH_SIZE = 16
VALIDATION_SIZE = 100
ORIGINAL_IMGS_DIR = 'images'
ORIGINAL_IMGS_INFO_FILE = 'data.csv'
#AGE_TRAINED_WEIGHTS_FILE = 'age_only_resnet50_weights.061-3.300-4.410.hdf5'
CROPPED_IMGS_DIR = 'normalized_images'
CROPPED_IMGS_INFO_FILE = 'normalized_data.csv'
TOP_LAYER_LOG_DIR = 'logs/top_layer'
ALL_LAYERS_LOG_DIR = 'logs/all_layers'
#The minimize percentage size of the targeted face to be considered for metadata extraction
MIN_FACE_SIZE_PERCENTAGE = 0.05
MIN_FACE_SIZE_PIXELS = (75 * 75)

77
imageProcessing.py Normal file
View File

@@ -0,0 +1,77 @@
import cv2
import dlib
import numpy as np
import tensorflow as tf
import config
from predictors.predict_gender import detect_genders
from predictors.predict_bmi import predict_bmi
from predictors.predict_age import predict_age
#from predictors.predict_beauty import predict_beauty
detector = dlib.get_frontal_face_detector()
def extract_metadata(image_file_path):
img = cv2.imread(image_file_path)
# img = cv2.resize(img, (640, 480))
input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_h, img_w, _ = np.shape(input_img)
detected, scores, idx = detector.run(input_img, 1)
faces = np.empty((len(detected), config.RESNET50_DEFAULT_IMG_WIDTH,
config.RESNET50_DEFAULT_IMG_WIDTH, 3))
filteredFaces = []
for i, d in enumerate(detected):
x1, y1, x2, y2, w, h = d.left(), d.top(), d.right() + 1, \
d.bottom() + 1, d.width(), d.height()
faceSize = (d.right() - d.left()) * (d.bottom() - d.top())
faceSizePercentage = (faceSize / (img_w * img_h)) * 100
xw1 = max(int(x1 - config.MARGIN * w), 0)
yw1 = max(int(y1 - config.MARGIN * h), 0)
xw2 = min(int(x2 + config.MARGIN * w), img_w - 1)
yw2 = min(int(y2 + config.MARGIN * h), img_h - 1)
# cv2.rectangle(img, (x1, y1), (x2, y2), (255, 0, 0), 2)
cv2.imwrite(''.join(image_file_path.split('.')[:-1]) + '-cropped-' + str(i) + '.jpg',
img[yw1:yw2, xw1:xw2])
faces[i, :, :, :] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1, :], (
config.RESNET50_DEFAULT_IMG_WIDTH,
config.RESNET50_DEFAULT_IMG_WIDTH)) / 255.00
face_boundary = (d.top(), d.right(), d.bottom(), d.left())
gender = detect_genders(face_boundary, input_img)
# FILTERING
female = gender["woman"] >= 0.5
# faceSizePercentage > config.MIN_FACE_SIZE_PERCENTAGE:
if female and faceSize > config.MIN_FACE_SIZE_PIXELS and idx[i] == 0:
filteredFaces.append((d, faceSize, faceSizePercentage, gender["woman"]))
results = []
for i, d in enumerate(filteredFaces):
bmiResult = predict_bmi([d[0]], img)
if len(bmiResult) > 0:
bmiResult = float(str(bmiResult[0][0]))
else:
bmiResult = None
ageResult = predict_age([d[0]], img)
if len(ageResult) > 0:
ageResult = float(str(ageResult[0]))
else:
ageResult = None
# beautyResult = predict_beauty([d[0]], img)
results.append({
"face": {
"left": d[0].left(),
"top": d[0].top(),
"right": d[0].right(),
"bottom": d[0].bottom(),
"size": d[1],
"sizePercentage": d[2]
},
"gender_woman": d[3],
"bmi": bmiResult,
"age": ageResult
})
return results

67
main.py Normal file
View File

@@ -0,0 +1,67 @@
import os
from datetime import datetime
from flask import Flask, jsonify, request
import config
from werkzeug.utils import secure_filename
from imageProcessing import extract_metadata
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}
app = Flask("P! Vision Metadata Engine")
app.config["UPLOAD_FOLDER"] = config.DATA_FOLDER
router = '/metadata/' + config.ENV
def allowed_file(file_name):
return '.' in file_name and \
file_name.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route(router + '/hello', methods=['GET'])
def hello():
return "Hello from P! Metadata Engine"
@app.route(router + '/calculate-image-metadata',
methods=['GET', 'POST'])
def calculate_image_metadata():
try:
if request.method == 'POST':
print()
print("request.form:\t", request.form)
print("request.files:\t", request.files)
request_id = request.form.get("request_id", 100)
print("request_id:\t", request_id)
if "image_file" not in request.files:
msg = "No file part: 'image_file' in POST request."
print(msg)
return msg
file = request.files["image_file"]
if file.filename == '':
msg = "File name is empty."
print(msg)
return msg
if file and allowed_file(file.filename):
print("Uploaded file:\t", file.filename)
image_file_name = str(datetime.now()).replace(':', '') + \
f'_{request_id}_' + secure_filename(file.filename)
image_file_path = os.path.join(
app.config["UPLOAD_FOLDER"], image_file_name)
file.save(image_file_path)
print(f"Image file is saved at {image_file_path}")
modelResponse =extract_metadata(image_file_path)
print("Response :\t", modelResponse)
return jsonify({"request_id": request_id, "metadata": modelResponse})
else:
msg = "POST the image file. Don't GET it."
print(msg)
return msg
except Exception as e:
msg = str(type(e).__name__) + ': ' + str(e)
print(msg)
return msg
if __name__ == '__main__':
app.run(debug=False, host="0.0.0.0", port=4455)

Binary file not shown.

Binary file not shown.

BIN
models/bmi_model_weights.h5 Normal file

Binary file not shown.

Binary file not shown.

Binary file not shown.

70
predictors/predict_age.py Normal file
View File

@@ -0,0 +1,70 @@
import cv2
import dlib
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.applications import ResNet50
from tensorflow.python.keras.layers import Dense
import config
def get_age_model():
age_model = ResNet50(
include_top=False,
weights='imagenet',
input_shape=(config.RESNET50_DEFAULT_IMG_WIDTH, config.RESNET50_DEFAULT_IMG_WIDTH, 3),
pooling='avg'
)
prediction = Dense(units=101,
kernel_initializer='he_normal',
use_bias=False,
activation='softmax',
name='pred_age')(age_model.output)
age_model = Model(inputs=age_model.input, outputs=prediction)
return age_model
def get_model():
base_model = get_age_model()
base_model.load_weights(config.AGE_TRAINED_MODEL_PATH)
print('Loaded weights from age classifier')
return base_model
def get_trained_model():
_model = get_model()
return _model
model = get_trained_model()
#detector = dlib.get_frontal_face_detector()
graph = tf.get_default_graph()
def predict_age(detected_faces, img):
input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_h, img_w, _ = np.shape(input_img)
faces = np.empty((len(detected_faces), config.RESNET50_DEFAULT_IMG_WIDTH,
config.RESNET50_DEFAULT_IMG_WIDTH, 3))
for i, d in enumerate(detected_faces):
x1, y1, x2, y2, w, h = d.left(), d.top(), d.right() + 1, \
d.bottom() + 1, d.width(), d.height()
xw1 = max(int(x1 - config.MARGIN * w), 0)
yw1 = max(int(y1 - config.MARGIN * h), 0)
xw2 = min(int(x2 + config.MARGIN * w), img_w - 1)
yw2 = min(int(y2 + config.MARGIN * h), img_h - 1)
faces[i, :, :, :] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1, :], (
config.RESNET50_DEFAULT_IMG_WIDTH,
config.RESNET50_DEFAULT_IMG_WIDTH)) / 255.00
with graph.as_default():
results = model.predict(faces)
ages = np.arange(0, 101).reshape(101, 1)
predicted_ages = results.dot(ages).flatten()
return predicted_ages

View File

@@ -0,0 +1,128 @@
from keras.layers import Conv2D, Input, MaxPool2D,Flatten, Dense, Permute, GlobalAveragePooling2D
from keras.models import Model
from keras.optimizers import Adam
import numpy as np
import pickle
import keras
import cv2
import sys
import dlib
import os.path
from keras.models import Sequential
from keras.applications.resnet50 import ResNet50
#from keras.applications.resnet50 import Dense
from keras.layers.core import Dense
from keras.optimizers import Adam
import pickle
import numpy as np
import cv2
import os
from keras.layers import Dropout
import config
#APP_ROOT = os.path.dirname(os.path.abspath(__file__))
#parent_path = os.path.dirname(APP_ROOT)
#parent_path = os.path.dirname(parent_path)
#model_path = parent_path+"/common/mmod_human_face_detector.dat"
#cnn_face_detector = dlib.cnn_face_detection_model_v1(model_path)
cnn_face_detector = dlib.cnn_face_detection_model_v1(config.CNN_FACE_DETECTOR_MODEL_PATH)
resnet = ResNet50(include_top=False, pooling='avg')
beautyModel = Sequential()
beautyModel.add(resnet)
beautyModel.add(Dense(5, activation='softmax'))
beautyModel.layers[0].trainable = False
beautyModel.load_weights(config.BEAUTY_MODEL_WEIGHTS_PATH)
def score_mapping(modelScore):
if modelScore <= 1.9:
mappingScore = ((4 - 2.5) / (1.9 - 1.0)) * (modelScore-1.0) + 2.5
elif modelScore <= 2.8:
mappingScore = ((5.5 - 4) / (2.8 - 1.9)) * (modelScore-1.9) + 4
elif modelScore <= 3.4:
mappingScore = ((6.5 - 5.5) / (3.4 - 2.8)) * (modelScore-2.8) + 5.5
elif modelScore <= 4:
mappingScore = ((8 - 6.5) / (4 - 3.4)) * (modelScore-3.4) + 6.5
elif modelScore < 5:
mappingScore = ((9 - 8) / (5 - 4)) * (modelScore-4) + 8
return mappingScore
def predict_beauty(img):
#im0 = cv2.imread(imgPath)
im0 = img
if im0.shape[0] > 1280:
new_shape = (1280, im0.shape[1] * 1280 / im0.shape[0])
elif im0.shape[1] > 1280:
new_shape = (im0.shape[0] * 1280 / im0.shape[1], 1280)
elif im0.shape[0] < 640 or im0.shape[1] < 640:
new_shape = (im0.shape[0] * 2, im0.shape[1] * 2)
else:
new_shape = im0.shape[0:2]
im = cv2.resize(im0, (int(new_shape[1]), int(new_shape[0])))
dets = cnn_face_detector(im, 0)
for i, d in enumerate(dets):
face = [d.rect.left(), d.rect.top(), d.rect.right(), d.rect.bottom()]
croped_im = im[face[1]:face[3], face[0]:face[2], :]
resized_im = cv2.resize(croped_im, (224, 224))
normed_im = np.array([(resized_im - 127.5) / 127.5])
pred = beautyModel.predict(normed_im)
ldList = pred[0]
out = 1 * ldList[0] + 2 * ldList[1] + 3 * ldList[2] + 4 * ldList[3] + 5 * ldList[4]
out = score_mapping(out)
return out
# print(img + " 打分:" + str('%.2f' % (out)))
# cv2.rectangle(im, (face[0], face[1]), (face[2], face[3]), (0, 255, 0), 3)
# cv2.putText(im, str('%.2f' % (out)), (face[0], face[3]), cv2.FONT_HERSHEY_SIMPLEX,
# 1, (0, 0, 255), 2)
#
# ret = path + "/output-" + img
# cv2.imwrite(ret, im)
# return ret
def predict_beauty_from_faces(detected_faces, img):
outList = []
im0 = img
if im0.shape[0] > 1280:
new_shape = (1280, im0.shape[1] * 1280 / im0.shape[0])
elif im0.shape[1] > 1280:
new_shape = (im0.shape[0] * 1280 / im0.shape[1], 1280)
elif im0.shape[0] < 640 or im0.shape[1] < 640:
new_shape = (im0.shape[0] * 2, im0.shape[1] * 2)
else:
new_shape = im0.shape[0:2]
im = cv2.resize(im0, (int(new_shape[1]), int(new_shape[0])))
for i, d in enumerate(detected_faces):
face = [d.left(), d.top(), d.right(), d.bottom()]
croped_im = im[face[1]:face[3], face[0]:face[2], :]
resized_im = cv2.resize(croped_im, (224, 224))
normed_im = np.array([(resized_im - 127.5) / 127.5])
pred = beautyModel.predict(normed_im)
ldList = pred[0]
out = 1 * ldList[0] + 2 * ldList[1] + 3 * ldList[2] + 4 * ldList[3] + 5 * ldList[4]
out = score_mapping(out)
outList.append(out)
return outList
# beauty_predict(parent_path+"/samples/image",'fengjie.jpg')
# beauty_predict(parent_path+"/samples/image",'nenghua.jpg')
# beauty_predict(parent_path+"/samples/image",'shunli.jpg')
# beauty_predict(parent_path+"/samples/image",'test1.jpg')
# beauty_predict(parent_path+"/samples/image",'test2.jpg')
# beauty_predict(parent_path+"/samples/image",'test3.jpg')
# beauty_predict(parent_path+"/samples/image",'fty1845.jpg')
#beauty_predict(parent_path+"/samples/image",'fty1959.jpg')
# beauty_predict(parent_path+"/samples/image",'jiyou.png')

78
predictors/predict_bmi.py Normal file
View File

@@ -0,0 +1,78 @@
import cv2
import dlib
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.models import Model
from tensorflow.python.keras.applications import ResNet50
from tensorflow.python.keras.layers import Dense
import config
def get_age_model():
age_model = ResNet50(
include_top=False,
weights='imagenet',
input_shape=(config.RESNET50_DEFAULT_IMG_WIDTH, config.RESNET50_DEFAULT_IMG_WIDTH, 3),
pooling='avg'
)
prediction = Dense(units=101,
kernel_initializer='he_normal',
use_bias=False,
activation='softmax',
name='pred_age')(age_model.output)
age_model = Model(inputs=age_model.input, outputs=prediction)
return age_model
def get_model(ignore_age_weights=False):
base_model = get_age_model()
if not ignore_age_weights:
base_model.load_weights(config.AGE_TRAINED_WEIGHTS_FILE)
print('Loaded weights from age classifier')
last_hidden_layer = base_model.get_layer(index=-2)
base_model = Model(
inputs=base_model.input,
outputs=last_hidden_layer.output)
prediction = Dense(1, kernel_initializer='normal')(base_model.output)
model = Model(inputs=base_model.input, outputs=prediction)
return model
def get_trained_model():
weights_file = config.MODEL_WEIGHTS_PATH
_model = get_model(ignore_age_weights=True)
_model.load_weights(weights_file)
return _model
model = get_trained_model()
#detector = dlib.get_frontal_face_detector()
graph = tf.get_default_graph()
def predict_bmi(detected_faces, img):
input_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_h, img_w, _ = np.shape(input_img)
faces = np.empty((len(detected_faces), config.RESNET50_DEFAULT_IMG_WIDTH,
config.RESNET50_DEFAULT_IMG_WIDTH, 3))
for i, d in enumerate(detected_faces):
x1, y1, x2, y2, w, h = d.left(), d.top(), d.right() + 1, \
d.bottom() + 1, d.width(), d.height()
xw1 = max(int(x1 - config.MARGIN * w), 0)
yw1 = max(int(y1 - config.MARGIN * h), 0)
xw2 = min(int(x2 + config.MARGIN * w), img_w - 1)
yw2 = min(int(y2 + config.MARGIN * h), img_h - 1)
faces[i, :, :, :] = cv2.resize(img[yw1:yw2 + 1, xw1:xw2 + 1, :], (
config.RESNET50_DEFAULT_IMG_WIDTH,
config.RESNET50_DEFAULT_IMG_WIDTH)) / 255.00
with graph.as_default():
predictions = model.predict(faces)
return predictions

View File

@@ -0,0 +1,72 @@
import os
import cv2
import numpy as np
from keras.models import load_model
from PIL import Image
import config
gender_classifier = load_model(config.GENDER_MODEL_PATH, compile=False)
# https://github.com/keras-team/keras/issues/6462
gender_classifier._make_predict_function()
gender_target_size = gender_classifier.input_shape[1:3]
gender_offsets = (10, 10)
def detect_genders(faceBoundary, image):
result = ""
face = get_boundary_box(faceBoundary, np.shape(image))
x1, x2, y1, y2 = apply_offsets(face, gender_offsets)
rgb_face = image[y1:y2, x1:x2]
try:
rgb_face = cv2.resize(rgb_face, gender_target_size)
except Exception as e:
print('Error while resizing the image', e)
return result
rgb_face = pre_process_input(rgb_face)
rgb_face = np.expand_dims(rgb_face, 0)
gender_prediction = gender_classifier.predict(rgb_face)
result = {
'woman': float(gender_prediction[0][0]),
'man': float(gender_prediction[0][1])
}
return result
def apply_offsets(face_coordinates, offsets):
x, y, width, height = face_coordinates
x_off, y_off = offsets
return max(x - x_off, 0), x + width + x_off, max(y - y_off, 0), y + height + y_off
def pre_process_input(x, v2=False):
x = x.astype('float32')
x = x / 255.0
if v2:
x = x - 0.5
x = x * 2.0
return x
def load_image(imageFilename):
try:
img = Image.open(imageFilename)
img = img.convert('RGB')
return np.array(img)
except Exception as e:
print("Not a valid image found on {0}: {1}".format(imageFilename, e))
return None
def get_boundary_box(boundary, image_shape):
X = max(boundary[3], 0)
Y = max(boundary[0], 0)
width = min(abs(X - boundary[1]), image_shape[0])
height = min(abs(Y - boundary[2]), image_shape[1])
return [X, Y, width, height]

38
requirements.txt Normal file
View File

@@ -0,0 +1,38 @@
absl-py==0.7.0
astor==0.7.1
Augmentor==0.2.3
backports.functools-lru-cache==1.5
backports.weakref==1.0.post1
cycler==0.10.0
dlib==19.16.0
enum34==1.1.6
funcsigs==1.0.2
future==0.17.1
gast==0.2.2
grpcio==1.18.0
h5py==2.9.0
Keras==2.2.4
Keras-Applications==1.0.7
Keras-Preprocessing==1.0.9
kiwisolver==1.0.1
Markdown==3.0.1
matplotlib==2.2.3
mock==2.0.0
numpy==1.16.1
opencv-python==4.0.0.21
pandas==0.24.1
pbr==5.1.2
Pillow==5.4.1
protobuf==3.6.1
pyparsing==2.3.1
python-dateutil==2.8.0
pytz==2018.9
PyYAML==3.13
scipy==1.2.0
six==1.12.0
subprocess32==3.5.3
tensorboard==1.12.2
tensorflow==1.12.0
termcolor==1.1.0
tqdm==4.30.0
Werkzeug==0.14.1

21
test.html Normal file
View File

@@ -0,0 +1,21 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Metadata Calculator</title>
</head>
<body>
<form action = "http://localhost:4455/metadata/dev/calculate-image-metadata" method = "POST"
enctype = "multipart/form-data">
<label>Request ID</label>
<input type = "text" id = "request_id" name = "request_id"/>
<br>
<br>
<label>Image File</label>
<input type = "file" name = "image_file" />
<br>
<br>
<input type = "submit"/>
</form>
</body>
</html>