Introduction

Objective

The simple first use case for the facial profiling technology we developed is the ability to submit a collection of portraits, and recieve back the facial profiles and the labeled input image. The idea is to make it easy for social science researchers to control for the facial profile. For example, in designing experiment subjects cohorts, an experimental economics or psychology researcher may want to pair up subjects who have maximally different facial skin-tones, or facial width to height ratios. This service would enable them to either build that assignment directly into their computerized experiment, or at least remove the manual work needed to obtain those measurements today.

Installation of UbiOps

import sys
!{sys.executable} -m pip install ubiops
Requirement already satisfied: ubiops in /Users/home0/.pyenv/versions/3.7.8/lib/python3.7/site-packages (3.1.0)
Requirement already satisfied: certifi in /Users/home0/.pyenv/versions/3.7.8/lib/python3.7/site-packages (from ubiops) (2020.6.20)
Requirement already satisfied: six>=1.10 in /Users/home0/.pyenv/versions/3.7.8/lib/python3.7/site-packages (from ubiops) (1.15.0)
Requirement already satisfied: python-dateutil in /Users/home0/.pyenv/versions/3.7.8/lib/python3.7/site-packages (from ubiops) (2.8.1)
Requirement already satisfied: urllib3>=1.15 in /Users/home0/.pyenv/versions/3.7.8/lib/python3.7/site-packages (from ubiops) (1.25.11)

### Create a service user
import os
import json
from pathlib import Path
with open(Path(os.getenv('HOME'))/'.ubiops/ubiops.json') as f:
    data = json.load(f)

import ubiops
from os import environ
import ubiops

configuration = ubiops.Configuration()
configuration.api_key['Authorization'] = data['API_TOKEN']

client = ubiops.ApiClient(configuration)
api = ubiops.api.CoreApi(client)

print(api.service_status())
client.close()
{'status': 'ok'}

API_TOKEN = data['API_TOKEN']
PROJECT_NAME = 'facial-profile'
DEPLOYMENT_NAME = 'simple-tutorial'
DEPLOYMENT_VERSION = 'v1'

configuration = ubiops.Configuration()
configuration.api_key['Authorization'] = API_TOKEN

client = ubiops.ApiClient(configuration)
api = ubiops.CoreApi(client)
api.service_status()
{'status': 'ok'}
# deployment_template = ubiops.DeploymentCreate(
#     name=DEPLOYMENT_NAME,
#     description='A simple deployment that multiplies the input float by a random number.',
#     input_type='structured',
#     output_type='structured',
#     input_fields=[ubiops.DeploymentInputFieldCreate(name='input', data_type='double')],
#     output_fields=[ubiops.DeploymentOutputFieldCreate(name='output', data_type='double')]
# )
# 
# deployment = api.deployments_create(project_name=PROJECT_NAME, data=deployment_template)
# print(deployment)
# version_template = ubiops.VersionCreate(
#     version=DEPLOYMENT_VERSION,
#     language='python3.7',
#     memory_allocation=256,
#     maximum_instances=1,
#     minimum_instances=0,
#     maximum_idle_time=1800 # = 30 minutes
# )
# 
# version = api.versions_create(
#     project_name=PROJECT_NAME,
#     deployment_name=DEPLOYMENT_NAME,
#     data=version_template
# )
# print(version)

Code

%matplotlib inline

from pathlib import Path
# import random
# 
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import pandas as pd
import numpy as np
import skimage
from skimage import color

from prcvd.img import (
    compute_theta, apply_rgb2lab, calc_euclidean_error, MaskedImg, 
    LabelMask, FaceMask,  MeshMask, TrainedSegmentationModel
)

from prcvd.core import IndexerDict

## shared with training
def get_y_fn(fp):
    l_str = str(img_to_l[fp])
    out = l_str \
        .replace('labels', 'labels_int') \
        .replace('png', 'tif')
    return out


    
class FacialProfile:
    def __init__(self, img, model, sampling_strategy='use_all', align_face=True):
        self.segmask = None
        self.eye_slope_threshold = 0.50 #degrees
        self.model = model
        self.img = img
        self.eye_slope = np.nan
        self.apply_rotation(angle=0.0)
        self.fix_rotation()
        self.mesh = MeshMask(img=self.segmask.decoded_img)
        self.skin_tones = self.compute_skintones(
            sampling_strategy=sampling_strategy
        )
        self.tot_head_area, self.face_areas = self.create_area_features()

        self.bizygomatic_left, self.bizygomatic_right, self.bizygomatic_dist = \
            self.compute_bizygomatic_width()
    
        self.upperfacial_top, self.upperfacial_bottom, self.upperfacial_dist = \
            self.compute_upperfacial_height()
        
        self.fwhr = self.bizygomatic_dist / self.upperfacial_dist
        # self.modeled_img_fp = write_to/'imgs'/(fp.stem+'.jpg') #TODO
    
    def get_profile(self):
        """Returns the face profile object."""
        tones = {'rgb_of_{}'.format(k):v for k,v in self.skin_tones.items()}
        other = {
            'img_rot_degrees': self.img.rotation_degrees,
            'img_num_rotations': self.img.num_rotations,
            'img_eye_slope': self.eye_slope,
            'fwhr': self.fwhr,
            'bizygoatic_w_px': self.bizygomatic_dist,
            'upperfacial_h_px': self.upperfacial_dist,
            'tot_head_area_px': self.tot_head_area
        }
        out = {**tones, **other}
        out = {**out, **self.face_areas}
        return out
    
    
    def apply_rotation(self, angle):
        """Applies a rotation by angle (in degrees) to self.img"""
        if angle != 0.0:
            self.img.rotate(angle)
            
        self.segmask = FaceMask(img=self.img, model=self.model)
        self.create_eye_features()
        # TODO: add check to see if the face rotation worked out
        self.create_secondary_features()
    
    
    def fix_rotation(self,):
        """Loops over rotations until the image is corrected."""
        while abs(self.theta_degrees) > self.eye_slope_threshold:
            self.apply_rotation(angle=self.theta_degrees)
        
    
    def create_area_features(self):
        nots = ['Background/undefined']
        total_area = {
            lab: np.count_nonzero(self.segmask.mask == code) 
            for lab, code in self.segmask.label_to_code.items()
            if lab not in nots
        }
        s = sum(total_area.values())
        total_area = {'pct_of_head_{}'.format(k): v/s for k,v in total_area.items()}
        return s, total_area
    
    
    def create_eye_features(self):
        """Writes the eye features."""
        self.segmask.add_eyes()
        self.eye_slope = self.segmask.compute_eye_slope()          
        if self.eye_slope > 0.0:            
            self.theta_degrees = \
                -1.0*compute_theta(slope1=self.eye_slope, slope2=0.0)
            
        elif self.eye_slope < 0.0:
            self.theta_degrees = \
                compute_theta(slope2=self.eye_slope, slope1=0.0)
        
        elif self.eye_slope == 0.0:
            self.theta_degrees = \
                compute_theta(slope2=self.eye_slope, slope1=0.0)
        
        
    def create_secondary_features(self):
        """Writes the secondary facial features, dependent on reference features."""
        self.segmask.add_cheeks()
        self.segmask.add_forehead()

    
    def compute_skintones(self, sampling_strategy, thresh=None):
        """Computes skin tones for all regions"""
        if thresh:
            self.segmask.calc_new_decision(thresh=thresh)
        
        return {
            region: self.segmask.calc_region_color(
                region=region, 
                sampling_strategy=sampling_strategy
            )
            for region in self.segmask.label_to_code.keys()
        }
    
    
    def compute_bizygomatic_width(self):
        """"""
        # Sources: 
        # https://carta.anthropogeny.org/moca/topics/upper-facial-height

        right_eye_right = self.segmask.find_region_extrema(
            region='right_eye',direction='right',
        )
        right_cheek_right = self.segmask.find_region_extrema(
            region='right_cheek',direction='right',
        )
        bizygomatic_right = (float(right_cheek_right[0]), float(right_eye_right[1]))

        left_eye_left = self.segmask.find_region_extrema(
            region='left_eye',direction='left',
        )
        left_cheek_left = self.segmask.find_region_extrema(
            region='left_cheek',direction='left',
        )
        bizygomatic_left = (float(left_cheek_left[0]), float(left_eye_left[1]))
        bizygomatic_dist = bizygomatic_right[0] - bizygomatic_left[0]

        return bizygomatic_left, bizygomatic_right, bizygomatic_dist
        
        
    def compute_upperfacial_height(self):
        """"""
        upperfacial_top = self.segmask.find_region_extrema(
            region='Eyebrows',direction='bottom',
        )
        upperfacial_bottom = self.segmask.find_region_extrema(
            region='Lips',direction='top',
        )
        upperfacial_top = (float(upperfacial_top[0]), float(upperfacial_top[1]))
        upperfacial_bottom = (float(upperfacial_bottom[0]), float(upperfacial_bottom[1]))
        upperfacial_dist = upperfacial_bottom[1] - upperfacial_top[1]
        
        return upperfacial_top, upperfacial_bottom, upperfacial_dist

        
    def get_skintones_in_lab(self):
        """
        Returns skin tones in LAB colors
        """
        return {
            k: apply_rgb2lab(v) 
            for k, v in self.skin_tones.items()
        }

path = Path("/ws/data/skin-tone/headsegmentation_dataset_ccncsa")
mod_dir = path/'Models'
mod_fp = mod_dir/'checkpoint_20201007'
# TODO: figure out how to get the labels from the trained model...
output_classes = ['Background/undefined', 'Lips', 'Eyes', 'Nose', 'Hair', 
  'Ears', 'Eyebrows', 'Teeth', 'General face', 'Facial hair',
  'Specs/sunglasses']
size = 224

truth_fp = Path('/ws/data/skin-tone/ScreenshotFaceAfterStatement/erics_imgs/MasterFacesDatabaseLabels.xlsx - labels.csv') 
truth = pd.read_csv(truth_fp)
truth.index = truth['PostStatementPhotoFilename']
del truth['PostStatementPhotoFilename']
truth['L_m'] = (truth['L_l'] + truth['L_r']) / 2
truth['a_m'] = (truth['a_l'] + truth['a_r']) / 2
truth['b_m'] = (truth['b_l'] + truth['b_r']) / 2

def run_summary(fp, summary, attempt=1, num_attempts=10):
    print(fp)
    outimg = write_to/'imgs'/(fp.stem+'.jpg')
    
    if outimg.exists():
        return None, None

    try: del img
    except: pass
    img = MaskedImg()
    img.load_from_file(fn=fp)

    try: del model
    except: pass

    model = TrainedSegmentationModel(
        mod_fp=mod_fp, 
        input_size=size,
        output_classes=output_classes
    )

    try: del profile
    except: pass
    
    try:
        profile = FacialProfile(
            model=model, 
            img=img, 
            sampling_strategy='use_all', 
            align_face=True
        )
    except:
        if not attempt > num_attempts:
            return run_summary(
                fp=fp, 
                summary=summary,
                attempt=attempt+1,
                num_attempts=num_attempts
            )
        else:
            return None, None
    
    plt.figure(figsize=(10,10))
    plt.imshow(profile.segmask.decoded_img.img)
    plt.imshow(
        skimage.color.label2rgb(np.array(profile.segmask.mask)), 
        alpha=0.3
    )
    plt.title('Computed fWHR based on Segmentation Only (not FaceMesh).\nfWHR: {}'.format(profile.fwhr))

    plt.scatter(x=[profile.bizygomatic_right[0]], 
                y=[profile.bizygomatic_right[1]], 
                marker='+', c='orange')
    plt.scatter(x=[profile.bizygomatic_left[0]], 
                y=[profile.bizygomatic_left[1]], 
                marker='+', c='orange')
    plt.plot(
        [profile.bizygomatic_right[0], profile.bizygomatic_right[0]], 
        [0, profile.segmask.mask.shape[1]-1],'ro-')
    plt.plot(
        [profile.bizygomatic_left[0], profile.bizygomatic_left[0]], 
        [0, profile.segmask.mask.shape[1]-1],'ro-')

    plt.scatter(x=[profile.upperfacial_top[0]], 
                y=[profile.upperfacial_top[1]],
                marker='+', c='red')
    plt.plot(
        [0, profile.segmask.mask.shape[0]-1], 
        [profile.upperfacial_top[1], profile.upperfacial_top[1]],
        'go-'
    )

    plt.scatter(x=[profile.upperfacial_bottom[0]], 
                y=[profile.upperfacial_bottom[1]],
                marker='+', c='red')
    plt.plot(
        [0, profile.segmask.mask.shape[0]-1], 
        [profile.upperfacial_bottom[1], profile.upperfacial_bottom[1]], 'go-')
    plt.savefig(outimg, format='jpeg')
    
    row = profile.get_profile()
    row['model_id'] = mod_fp
    
    return row, plt
    

erics_img = Path('/ws/data/skin-tone/from_zenodo/Media/MediaForExport/')
ls = [fp for fp in list(erics_img.ls()) if str(fp)[-4:] == '.jpg']
# row = truth.loc[fp.name].to_dict()
write_to = Path('/ws/data/skin-tone/output1')
summary = []

fp = ls[1]
outimg = write_to/'imgs'/(fp.stem+'.jpg')

try: del img
except: pass
img = MaskedImg()
img.load_from_file(fn=fp)

try: del model
except: pass

model = TrainedSegmentationModel(
    mod_fp=mod_fp, 
    input_size=size,
    output_classes=output_classes
)

try: del profile
except: pass


profile = FacialProfile(
    model=model, 
    img=img, 
    sampling_strategy='use_all', 
    align_face=True
)

plt.imshow(profile.img.img)
<matplotlib.image.AxesImage at 0x7f907811e6a0>

print(profile.bizygomatic_right)
profile.bizygomatic_left
(172.0, 83.0)
(92.0, 82.0)

def plot_all(save=False)
    plt.figure(figsize=(10,10))
    plt.imshow(profile.segmask.decoded_img.img)
    plt.imshow(
        skimage.color.label2rgb(np.array(profile.segmask.mask)), 
        alpha=0.3
    )
    plt.title('Computed fWHR based on Segmentation Only (not FaceMesh).\nfWHR: {}'.format(profile.fwhr))

    plt.scatter(x=[profile.bizygomatic_right[0]], 
                y=[profile.bizygomatic_right[1]], 
                marker='+', c='orange')
    plt.scatter(x=[profile.bizygomatic_left[0]], 
                y=[profile.bizygomatic_left[1]], 
                marker='+', c='orange')
    plt.plot(
        [profile.bizygomatic_right[0], profile.bizygomatic_right[0]], 
        [0, profile.segmask.mask.shape[1]-1],'ro-')
    plt.plot(
        [profile.bizygomatic_left[0], profile.bizygomatic_left[0]], 
        [0, profile.segmask.mask.shape[1]-1],'ro-')

    plt.scatter(x=[profile.upperfacial_top[0]], 
                y=[profile.upperfacial_top[1]],
                marker='+', c='red')
    plt.plot(
        [0, profile.segmask.mask.shape[0]-1], 
        [profile.upperfacial_top[1], profile.upperfacial_top[1]],
        'go-'
    )

    plt.scatter(x=[profile.upperfacial_bottom[0]], 
                y=[profile.upperfacial_bottom[1]],
                marker='+', c='red')
    plt.plot(
        [0, profile.segmask.mask.shape[0]-1], 
        [profile.upperfacial_bottom[1], profile.upperfacial_bottom[1]], 'go-')
    plt.savefig(outimg, format='jpeg')

    row = profile.get_profile()
    row['model_id'] = mod_fp

    ##Exceptions
    # ZeroDivisionError, RuntimeError

summary = []
failures = []
for fp in ls[:10]:
    if str(fp)[-4:] != '.jpg': continue
    try:
        row, plt = run_summary(fp=fp, summary=summary)
    except:
        failures.append(fp)
        continue
    if not row and not plt:
        failures.append(fp)
        continue
    
    summary.append(row)
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg

failures
[Path('/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg'),
 Path('/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg')]

profile.get_profile()
{'rgb_of_Background/undefined': (107, 104, 104),
 'rgb_of_Lips': (132, 100, 110),
 'rgb_of_Nose': (168, 144, 140),
 'rgb_of_Hair': (50, 48, 52),
 'rgb_of_Ears': (56, 21, 34),
 'rgb_of_Eyebrows': (80, 73, 77),
 'rgb_of_Teeth': None,
 'rgb_of_General face': (118, 112, 112),
 'rgb_of_Facial hair': None,
 'rgb_of_Specs/sunglasses': None,
 'rgb_of_right_eye': (91, 87, 93),
 'rgb_of_left_eye': (80, 76, 85),
 'rgb_of_left_cheek': (173, 153, 150),
 'rgb_of_right_cheek': (148, 129, 126),
 'rgb_of_forehead': (180, 156, 148),
 'img_eye_slope': -0.0,
 'fwhr': 1.7222222222222223,
 'bizygoatic_w_px': 62.0,
 'upperfacial_h_px': 36.0,
 'tot_head_area_px': 10828,
 'pct_of_head_Lips': 0.012929442186922793,
 'pct_of_head_Nose': 0.025951237532323604,
 'pct_of_head_Hair': 0.17694865164388623,
 'pct_of_head_Ears': 0.0001847063169560399,
 'pct_of_head_Eyebrows': 0.010251200591060215,
 'pct_of_head_Teeth': 0.0,
 'pct_of_head_General face': 0.5391577391946805,
 'pct_of_head_Facial hair': 0.0,
 'pct_of_head_Specs/sunglasses': 0.0,
 'pct_of_head_right_eye': 0.004155892131510898,
 'pct_of_head_left_eye': 0.0034170668636867383,
 'pct_of_head_left_cheek': 0.05744366457332841,
 'pct_of_head_right_cheek': 0.05227188769855929,
 'pct_of_head_forehead': 0.11728851126708534}

plt.figure(figsize = (8,8))
plt.title('Original Image, Rotated and Shrunken to size={}'.format(size))
plt.imshow(profile.segmask.decoded_img.img)
<matplotlib.image.AxesImage at 0x7f6a093e5940>

plt.figure(figsize = (8,8))
plt.title('Enhanced Segmentation Model Output')
plt.imshow(profile.segmask.mask)
<matplotlib.image.AxesImage at 0x7f6a06a3a2e8>

Compute FaceMesh (google)

profile.mesh.draw(thickness=1, circle_radius=1)

plt.figure(figsize=(10,10))
plt.imshow(profile.segmask.decoded_img.img)
plt.imshow(
    skimage.color.label2rgb(np.array(profile.segmask.mask)), 
    alpha=0.3
)
plt.title('Computed fWHR based on Segmentation Only (not FaceMesh).\nfWHR: {}'.format(profile.fwhr))

plt.scatter(x=[profile.bizygomatic_right[0]], 
            y=[profile.bizygomatic_right[1]], 
            marker='+', c='orange')
plt.scatter(x=[profile.bizygomatic_left[0]], 
            y=[profile.bizygomatic_left[1]], 
            marker='+', c='orange')
plt.plot(
    [profile.bizygomatic_right[0], profile.bizygomatic_right[0]], 
    [0, profile.segmask.mask.shape[1]-1],'ro-')
plt.plot(
    [profile.bizygomatic_left[0], profile.bizygomatic_left[0]], 
    [0, profile.segmask.mask.shape[1]-1],'ro-')

plt.scatter(x=[profile.upperfacial_top[0]], 
            y=[profile.upperfacial_top[1]],
            marker='+', c='red')
plt.plot(
    [0, profile.segmask.mask.shape[0]-1], 
    [profile.upperfacial_top[1], profile.upperfacial_top[1]],
    'go-'
)

plt.scatter(x=[profile.upperfacial_bottom[0]], 
            y=[profile.upperfacial_bottom[1]],
            marker='+', c='red')
plt.plot(
    [0, profile.segmask.mask.shape[0]-1], 
    [profile.upperfacial_bottom[1], profile.upperfacial_bottom[1]], 'go-')
/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py:4: FutureWarning: The new recommended value for bg_label is 0. Until version 0.19, the default bg_label value is -1. From version 0.19, the bg_label default value will be 0. To avoid this warning, please explicitly set bg_label value.
  after removing the cwd from sys.path.
[<matplotlib.lines.Line2D at 0x7f0fd737a048>]

Plot all Models Together

plt.figure(figsize=(10,10))
# plt.imshow(profile.segmask.decoded_img.img)
plt.imshow(profile.segmask.decoded_img.img)
plt.imshow(
    skimage.color.label2rgb(np.array(profile.segmask.mask)), 
    alpha=0.3
)
plt.title('fWHR: {}'.format(profile.fwhr))

plt.scatter(x=[profile.bizygomatic_right[0]], 
            y=[profile.bizygomatic_right[1]], 
            marker='+', c='orange')
plt.scatter(x=[profile.bizygomatic_left[0]], 
            y=[profile.bizygomatic_left[1]], 
            marker='+', c='orange')
plt.plot(
    [profile.bizygomatic_right[0], profile.bizygomatic_right[0]], 
    [0, profile.segmask.mask.shape[1]-1],'ro-')
plt.plot(
    [profile.bizygomatic_left[0], profile.bizygomatic_left[0]], 
    [0, profile.segmask.mask.shape[1]-1],'ro-')

plt.scatter(x=[profile.upperfacial_top[0]], 
            y=[profile.upperfacial_top[1]],
            marker='+', c='red')
plt.plot(
    [0, profile.segmask.mask.shape[0]-1], 
    [profile.upperfacial_top[1], profile.upperfacial_top[1]],
    'go-'
)

plt.scatter(x=[profile.upperfacial_bottom[0]], 
            y=[profile.upperfacial_bottom[1]],
            marker='+', c='red')
plt.plot(
    [0, profile.segmask.mask.shape[0]-1], 
    [profile.upperfacial_bottom[1], profile.upperfacial_bottom[1]], 'go-')
/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py:5: FutureWarning: The new recommended value for bg_label is 0. Until version 0.19, the default bg_label value is -1. From version 0.19, the bg_label default value will be 0. To avoid this warning, please explicitly set bg_label value.
  """
[<matplotlib.lines.Line2D at 0x7f0fa570d5c0>]

(a1,b1) = profile.segmask.find_region_extrema(
            region='right_eye', direction='bottom'
        )
(a2,b2) = profile.segmask.find_region_extrema(
            region='left_eye', direction='bottom'
        )

# view region in isolation
plt.figure(figsize = (10,10))
grp = (profile.segmask.mask == 1).nonzero()
plt.gca().invert_yaxis()
plt.scatter(grp[:,1],grp[:,0],)
<matplotlib.collections.PathCollection at 0x7f79a92135f8>

Validation

Basic Validation of the Facial Segmentation Model and Added Regions

Original Image

plt.figure(figsize = (10,10))
plt.imshow(profile.segmask.decoded_img.img)
<matplotlib.image.AxesImage at 0x7f0fa584bb38>

Facial Segmentation Model Output Mask

plt.figure(figsize = (10,10))
plt.imshow(profile.segmask.mask)
<matplotlib.image.AxesImage at 0x7f0fd7e79588>

# plot centroid eye points
centroid_leye = profile.segmask.find_region_extrema(region='left_eye', direction='centroid')
centroid_reye = profile.segmask.find_region_extrema(region='right_eye', direction='centroid')
slope = (float(centroid_reye[1])-centroid_leye[1])/(centroid_reye[0]-centroid_leye[0])

plt.figure(figsize = (10,10))
plt.imshow(profile.segmask.mask)
plt.scatter(x=[centroid_leye[0]], y=[centroid_leye[1]], marker='+', c='black')
plt.scatter(x=[centroid_reye[0]], y=[centroid_reye[1]], marker='+', c='black')
plt.plot([centroid_leye[0],centroid_reye[0]], [[centroid_leye[1]],[centroid_reye[1]]],'ro-')
flat_pt1 = centroid_leye
flat_pt2 = torch.Tensor([centroid_leye[0], centroid_reye[1]])
plt.scatter(x=[flat_pt2[0]], y=[flat_pt2[1]], marker='+', c='black')
plt.plot([centroid_leye[0],flat_pt2[0]], [[centroid_leye[1]],[flat_pt2[1]]],'bo-')
[<matplotlib.lines.Line2D at 0x7f0f76d34048>]

Validating by Intuition on Single Samples and Color Swatches

Validate Nose Sample

left_cheek_truth = np.array([row['L_l'], row['a_l'], row['b_l']])
right_cheek_truth = np.array([row['L_r'], row['a_r'], row['b_r']])
left_cheek_truth_rgb = color.lab2rgb(left_cheek_truth.astype(float))
right_cheek_truth_rgb = color.lab2rgb(right_cheek_truth.astype(float))

pred_left_cheek_lab = abs(skimage.color.rgb2lab(profile.skin_tones['left_cheek'],
    illuminant='D50', observer='2'
)).astype('int')
pred_right_cheek_lab = abs(skimage.color.rgb2lab(
    profile.skin_tones['right_cheek'],
    illuminant='D50', observer='2'
)).astype('int')
print('predicted left cheek (LAB)', pred_left_cheek_lab)
print('predicted right cheek (LAB)', pred_right_cheek_lab)
print('true left cheek',left_cheek_truth)
print('true right cheek',right_cheek_truth)


print('left cheek error', calc_euclidean_error(pred_left_cheek_lab, left_cheek_truth))
print('right cheek error',calc_euclidean_error(pred_right_cheek_lab, right_cheek_truth))
predicted left cheek (LAB) [64  5  9]
predicted right cheek (LAB) [55  4  7]
true left cheek [68  8  4]
true right cheek [57  7  4]
left cheek error 7.0710678118654755
right cheek error 4.69041575982343

plt.figure(figsize = (10,10))
# plt.imshow(img)

region = 'Nose'
fig, ax = plt.subplots()
ax.add_patch(
     patches.Rectangle(
        (0, 0), 1, 1,
        edgecolor = (np.array(profile.skin_tones[region]).astype(float)) / 255,
        facecolor = (np.array(profile.skin_tones[region]).astype(float)) / 255,
        fill=True
     )
)

ax.annotate('predicted - {}'.format(region), (0.5, 0.5), color='w', weight='bold', 
                fontsize=18, ha='center', va='center')

plt.show()

fig, ax = plt.subplots()
ax.add_patch(
     patches.Rectangle(
        (0, 0),1,1,
        edgecolor = right_cheek_truth_rgb,
        facecolor = right_cheek_truth_rgb,
        fill=True
     )
)

ax.annotate('Right Cheek Truth', (0.5, 0.5), color='w', weight='bold', 
                fontsize=18, ha='center', va='center')

plt.show()

fig, ax = plt.subplots()
ax.add_patch(
     patches.Rectangle(
        (0, 0),1,1,
        edgecolor = left_cheek_truth_rgb,
        facecolor = left_cheek_truth_rgb,
        fill=True
     )
)

ax.annotate('Left Cheek Truth', (0.5, 0.5), color='w', weight='bold', 
                fontsize=18, ha='center', va='center')

plt.show()
<Figure size 720x720 with 0 Axes>

Validate Left Cheek Sample

# plt.imshow(img)

region = 'left_cheek'
fig, ax = plt.subplots()
ax.add_patch(
     patches.Rectangle(
        (0, 0),1,1,
        edgecolor = (np.array(profile.skin_tones[region]).astype(float)) / 255,
        facecolor = (np.array(profile.skin_tones[region]).astype(float)) / 255,
        fill=True
     ) 
)
ax.annotate('predicted - {}'.format(region), (0.5, 0.5), color='w', weight='bold', 
                fontsize=18, ha='center', va='center')

plt.show()

fig, ax = plt.subplots()
ax.add_patch(
     patches.Rectangle(
        (0, 0),1,1,
        edgecolor = right_cheek_truth_rgb,
        facecolor = right_cheek_truth_rgb,
        fill=True
     )
)

ax.annotate('Right Cheek Truth', (0.5, 0.5), color='w', weight='bold', 
                fontsize=18, ha='center', va='center')

plt.show()

fig, ax = plt.subplots()
ax.add_patch(
     patches.Rectangle(
        (0, 0),1,1,
        edgecolor = left_cheek_truth_rgb,
        facecolor = left_cheek_truth_rgb,
        fill=True
     )
)

ax.annotate('Left Cheek Truth', (0.5, 0.5), color='w', weight='bold', 
                fontsize=18, ha='center', va='center')

plt.show()

Validate Right Cheek Sample

region = 'right_cheek'
fig, ax = plt.subplots()
ax.add_patch(
     patches.Rectangle(
        (0, 0),1,1,
        edgecolor = (np.array(profile.skin_tones[region]).astype(float)) / 255,
        facecolor = (np.array(profile.skin_tones[region]).astype(float)) / 255,
        fill=True
     ) 
)
ax.annotate('predicted - {}'.format(region), (0.5, 0.5), color='w', weight='bold', 
                fontsize=18, ha='center', va='center')

plt.show()

fig, ax = plt.subplots()
ax.add_patch(
     patches.Rectangle(
        (0, 0),1,1,
        edgecolor = right_cheek_truth_rgb,
        facecolor = right_cheek_truth_rgb,
        fill=True
     )
)

ax.annotate('Right Cheek Truth', (0.5, 0.5), color='w', weight='bold', 
                fontsize=18, ha='center', va='center')

plt.show()

fig, ax = plt.subplots()
ax.add_patch(
     patches.Rectangle(
        (0, 0),1,1,
        edgecolor = left_cheek_truth_rgb,
        facecolor = left_cheek_truth_rgb,
        fill=True
     )
)

ax.annotate('Left Cheek Truth', (0.5, 0.5), color='w', weight='bold', 
                fontsize=18, ha='center', va='center')

plt.show()

Validate Forehead Sample

region = 'forehead'
fig, ax = plt.subplots()
ax.add_patch(
     patches.Rectangle(
        (0, 0),1,1,
        edgecolor = (np.array(profile.skin_tones[region]).astype(float)) / 255,
        facecolor = (np.array(profile.skin_tones[region]).astype(float)) / 255,
        fill=True
     ) 
)
ax.annotate('predicted - {}'.format(region), (0.5, 0.5), color='w', weight='bold', 
                fontsize=18, ha='center', va='center')

plt.show()

fig, ax = plt.subplots()
ax.add_patch(
     patches.Rectangle(
        (0, 0),1,1,
        edgecolor = right_cheek_truth_rgb,
        facecolor = right_cheek_truth_rgb,
        fill=True
     )
)

ax.annotate('Right Cheek Truth', (0.5, 0.5), color='w', weight='bold', 
                fontsize=18, ha='center', va='center')

plt.show()

fig, ax = plt.subplots()
ax.add_patch(
     patches.Rectangle(
        (0, 0),1,1,
        edgecolor = left_cheek_truth_rgb,
        facecolor = left_cheek_truth_rgb,
        fill=True
     )
)

ax.annotate('Left Cheek Truth', (0.5, 0.5), color='w', weight='bold', 
                fontsize=18, ha='center', va='center')

plt.show()

Facial width-to-height ratio (fWHR)

# hor_lip: Top of lip, parallel to hor_eye
# ver_rear: left side of right ear (subject perspective), orthogonal to hor_eye
# ver_rear: right side of left ear (subject perspective), orthogonal to hor_eye


# requires: right eye vs left eye
    # requires facial orientation transform (up side down -> right side up)

# Simplifying Assumptions

import cv2
import imutils

Validation Summary Table for Metrics Against All Images

import traceback
for im in ls:
    try:
        if im in out: continue
        if 'jpg' not in str(im): continue
        colors = run_img(fn=im, tags=tags)
        row = truth.loc[im.name].to_dict()
        row['fp'] = im
        row['PostStatementPhotoFilename'] = im.name
        for tag in tags:
            row['Lab_{}'.format(tag)] = colors_profile_lab[tag]
            row['error_{}'.format(tag)] = calc_euclidean_error(
                colors_profile_lab[tag],
                np.array([row['L_m'], row['a_m'], row['b_m']])
            )
        out[im] = row
    except KeyboardInterrupt:
        break
        
    except:
        traceback.print_exc()
        

df = pd.DataFrame(out.values())

df[['error_forehead','error_right_cheek','error_left_cheek','error_Nose']].sum()
error_forehead       4201.102983
error_right_cheek    4253.485379
error_left_cheek     3998.065089
error_Nose           6192.452324
dtype: float64

df.to_csv('early_look_validation.csv')
df
L_l a_l b_l L_r a_r b_r Leftcheeksize_sq Rightcheeksize_sq coloration_notes L_m ... fp PostStatementPhotoFilename Lab_forehead error_forehead Lab_right_cheek error_right_cheek Lab_left_cheek error_left_cheek Lab_Nose error_Nose
0 67 13 4 51 12 3 51 51 NaN 59.0 ... /ws/data/skin-tone/ScreenshotFaceAfterStatement/erics_imgs/vlcsnap-2020-09-04-16h01m54s241.jpg vlcsnap-2020-09-04-16h01m54s241.jpg [68.25674162237833, 8.91622504113837, -15.457491770822251] 21.399000 [55.425016610496215, 11.704792912227369, -14.396626105998145] 18.267515 [63.52683332635404, 11.380539178516557, -14.996442437544054] 19.075214 [83.42976058083897, 12.153559972545857, -19.33626419328327] 33.442909
1 72 14 -4 83 13 -2 51 51 NaN 77.5 ... /ws/data/skin-tone/ScreenshotFaceAfterStatement/erics_imgs/vlcsnap-2020-09-05-13h02m33s216.jpg vlcsnap-2020-09-05-13h02m33s216.jpg [68.25674162237833, 8.91622504113837, -15.457491770822251] 16.175225 [55.425016610496215, 11.704792912227369, -14.396626105998145] 24.908046 [63.52683332635404, 11.380539178516557, -14.996442437544054] 18.537965 [83.42976058083897, 12.153559972545857, -19.33626419328327] 17.431250
2 64 8 7 59 6 5 51 51 NaN 61.5 ... /ws/data/skin-tone/ScreenshotFaceAfterStatement/erics_imgs/vlcsnap-2020-09-04-11h55m33s741.jpg vlcsnap-2020-09-04-11h55m33s741.jpg [68.25674162237833, 8.91622504113837, -15.457491770822251] 22.577631 [55.425016610496215, 11.704792912227369, -14.396626105998145] 21.795937 [63.52683332635404, 11.380539178516557, -14.996442437544054] 21.544089 [83.42976058083897, 12.153559972545857, -19.33626419328327] 33.902800
3 70 16 1 63 14 0 51 51 NaN 66.5 ... /ws/data/skin-tone/ScreenshotFaceAfterStatement/erics_imgs/vlcsnap-2020-09-05-15h44m56s025.jpg vlcsnap-2020-09-05-15h44m56s025.jpg [68.25674162237833, 8.91622504113837, -15.457491770822251] 17.167994 [55.425016610496215, 11.704792912227369, -14.396626105998145] 18.852669 [63.52683332635404, 11.380539178516557, -14.996442437544054] 16.188883 [83.42976058083897, 12.153559972545857, -19.33626419328327] 26.233498
4 59 14 5 55 15 6 51 51 NaN 57.0 ... /ws/data/skin-tone/ScreenshotFaceAfterStatement/erics_imgs/vlcsnap-2020-09-05-12h50m12s634.jpg vlcsnap-2020-09-05-12h50m12s634.jpg [68.25674162237833, 8.91622504113837, -15.457491770822251] 24.435819 [55.425016610496215, 11.704792912227369, -14.396626105998145] 20.153647 [63.52683332635404, 11.380539178516557, -14.996442437544054] 21.735564 [83.42976058083897, 12.153559972545857, -19.33626419328327] 36.343886
... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ... ...
198 73 14 -3 82 14 -3 51 51 NaN 77.5 ... /ws/data/skin-tone/ScreenshotFaceAfterStatement/erics_imgs/vlcsnap-2020-09-05-13h00m37s394.jpg vlcsnap-2020-09-05-13h00m37s394.jpg [68.25674162237833, 8.91622504113837, -15.457491770822251] 16.323961 [55.425016610496215, 11.704792912227369, -14.396626105998145] 24.949067 [63.52683332635404, 11.380539178516557, -14.996442437544054] 18.601763 [83.42976058083897, 12.153559972545857, -19.33626419328327] 17.476983
199 71 16 -3 67 15 -1 51 51 NaN 69.0 ... /ws/data/skin-tone/ScreenshotFaceAfterStatement/erics_imgs/vlcsnap-2020-09-05-19h12m51s833.jpg vlcsnap-2020-09-05-19h12m51s833.jpg [68.25674162237833, 8.91622504113837, -15.457491770822251] 15.000087 [55.425016610496215, 11.704792912227369, -14.396626105998145] 18.771258 [63.52683332635404, 11.380539178516557, -14.996442437544054] 14.691257 [83.42976058083897, 12.153559972545857, -19.33626419328327] 22.802691
200 56 8 6 63 8 6 51 51 NaN 59.5 ... /ws/data/skin-tone/ScreenshotFaceAfterStatement/erics_imgs/vlcsnap-2020-09-04-13h21m27s855.jpg vlcsnap-2020-09-04-13h21m27s855.jpg [68.25674162237833, 8.91622504113837, -15.457491770822251] 23.193619 [55.425016610496215, 11.704792912227369, -14.396626105998145] 21.127076 [63.52683332635404, 11.380539178516557, -14.996442437544054] 21.644723 [83.42976058083897, 12.153559972545857, -19.33626419328327] 35.097176
201 68 8 3 58 7 3 51 51 NaN 63.0 ... /ws/data/skin-tone/ScreenshotFaceAfterStatement/erics_imgs/vlcsnap-2020-09-05-13h16m00s788.jpg vlcsnap-2020-09-05-13h16m00s788.jpg [68.25674162237833, 8.91622504113837, -15.457491770822251] 19.243649 [55.425016610496215, 11.704792912227369, -14.396626105998145] 19.434589 [63.52683332635404, 11.380539178516557, -14.996442437544054] 18.417602 [83.42976058083897, 12.153559972545857, -19.33626419328327] 30.625797
202 78 8 6 71 7 4 51 51 NaN 74.5 ... /ws/data/skin-tone/ScreenshotFaceAfterStatement/erics_imgs/vlcsnap-2020-09-05-11h35m03s917.jpg vlcsnap-2020-09-05-11h35m03s917.jpg [68.25674162237833, 8.91622504113837, -15.457491770822251] 21.435786 [55.425016610496215, 11.704792912227369, -14.396626105998145] 27.527520 [63.52683332635404, 11.380539178516557, -14.996442437544054] 23.137128 [83.42976058083897, 12.153559972545857, -19.33626419328327] 26.337236

203 rows × 22 columns

Conclusions

In Summation

Next Steps

  • The accuracy of this is ~92% across the training set. So there’s going to be some errors. I heard that it gets a lot more accurate if we use a face box model with it. That isolates the face only from the picture