Introduction

Objective

The simple first use case for the facial profiling technology we developed is the ability to submit a collection of portraits, and recieve back the facial profiles and the labeled input image. The idea is to make it easy for social science researchers to control for the facial profile. For example, in designing experiment subjects cohorts, an experimental economics or psychology researcher may want to pair up subjects who have maximally different facial skin-tones, or facial width to height ratios. This service would enable them to either build that assignment directly into their computerized experiment, or at least remove the manual work needed to obtain those measurements today.

Installation of UbiOps

import sys
!{sys.executable} -m pip install ubiops
Requirement already satisfied: ubiops in /Users/home0/.pyenv/versions/3.7.8/lib/python3.7/site-packages (3.1.0)
Requirement already satisfied: certifi in /Users/home0/.pyenv/versions/3.7.8/lib/python3.7/site-packages (from ubiops) (2020.6.20)
Requirement already satisfied: six>=1.10 in /Users/home0/.pyenv/versions/3.7.8/lib/python3.7/site-packages (from ubiops) (1.15.0)
Requirement already satisfied: python-dateutil in /Users/home0/.pyenv/versions/3.7.8/lib/python3.7/site-packages (from ubiops) (2.8.1)
Requirement already satisfied: urllib3>=1.15 in /Users/home0/.pyenv/versions/3.7.8/lib/python3.7/site-packages (from ubiops) (1.25.11)

### Create a service user
import os
import json
from pathlib import Path
with open(Path(os.getenv('HOME'))/'.ubiops/ubiops.json') as f:
    data = json.load(f)

import ubiops
from os import environ
import ubiops

configuration = ubiops.Configuration()
configuration.api_key['Authorization'] = data['API_TOKEN']

client = ubiops.ApiClient(configuration)
api = ubiops.api.CoreApi(client)

print(api.service_status())
client.close()
{'status': 'ok'}

API_TOKEN = data['API_TOKEN']
PROJECT_NAME = 'facial-profile'
DEPLOYMENT_NAME = 'simple-tutorial'
DEPLOYMENT_VERSION = 'v1'

configuration = ubiops.Configuration()
configuration.api_key['Authorization'] = API_TOKEN

client = ubiops.ApiClient(configuration)
api = ubiops.CoreApi(client)
api.service_status()
{'status': 'ok'}
# deployment_template = ubiops.DeploymentCreate(
#     name=DEPLOYMENT_NAME,
#     description='A simple deployment that multiplies the input float by a random number.',
#     input_type='structured',
#     output_type='structured',
#     input_fields=[ubiops.DeploymentInputFieldCreate(name='input', data_type='double')],
#     output_fields=[ubiops.DeploymentOutputFieldCreate(name='output', data_type='double')]
# )
# 
# deployment = api.deployments_create(project_name=PROJECT_NAME, data=deployment_template)
# print(deployment)
# version_template = ubiops.VersionCreate(
#     version=DEPLOYMENT_VERSION,
#     language='python3.7',
#     memory_allocation=256,
#     maximum_instances=1,
#     minimum_instances=0,
#     maximum_idle_time=1800 # = 30 minutes
# )
# 
# version = api.versions_create(
#     project_name=PROJECT_NAME,
#     deployment_name=DEPLOYMENT_NAME,
#     data=version_template
# )
# print(version)

Code

%matplotlib inline

from pathlib import Path
# import random
# 
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import pandas as pd
import numpy as np
import skimage
from skimage import color

from prcvd.img import (
    compute_theta, apply_rgb2lab, calc_euclidean_error, MaskedImg, 
    LabelMask, FaceMask,  MeshMask, TrainedSegmentationModel
)

from prcvd.core import IndexerDict

## shared with training
def get_y_fn(fp):
    l_str = str(img_to_l[fp])
    out = l_str \
        .replace('labels', 'labels_int') \
        .replace('png', 'tif')
    return out


    
class FacialProfile:
    def __init__(self, img, model, sampling_strategy='use_all', align_face=True):
        self.segmask = None
        self.eye_slope_threshold = 0.50 #degrees
        self.model = model
        self.img = img
        self.eye_slope = np.nan
        self.apply_rotation(angle=0.0)
        self.fix_rotation()
        self.mesh = MeshMask(img=self.segmask.decoded_img)
        self.skin_tones = self.compute_skintones(
            sampling_strategy=sampling_strategy
        )
        self.tot_head_area, self.face_areas = self.create_area_features()

        self.bizygomatic_left, self.bizygomatic_right, self.bizygomatic_dist = \
            self.compute_bizygomatic_width()
    
        self.upperfacial_top, self.upperfacial_bottom, self.upperfacial_dist = \
            self.compute_upperfacial_height()
        
        self.fwhr = self.bizygomatic_dist / self.upperfacial_dist
        # self.modeled_img_fp = write_to/'imgs'/(fp.stem+'.jpg') #TODO
    
    def get_profile(self):
        """Returns the face profile object."""
        tones = {'rgb_of_{}'.format(k):v for k,v in self.skin_tones.items()}
        other = {
            'img_rot_degrees': self.img.rotation_degrees,
            'img_num_rotations': self.img.num_rotations,
            'img_eye_slope': self.eye_slope,
            'fwhr': self.fwhr,
            'bizygoatic_w_px': self.bizygomatic_dist,
            'upperfacial_h_px': self.upperfacial_dist,
            'tot_head_area_px': self.tot_head_area
        }
        out = {**tones, **other}
        out = {**out, **self.face_areas}
        return out
    
    
    def apply_rotation(self, angle):
        """Applies a rotation by angle (in degrees) to self.img"""
        if angle != 0.0:
            self.img.rotate(angle)
            
        self.segmask = FaceMask(img=self.img, model=self.model)
        self.create_eye_features()
        # TODO: add check to see if the face rotation worked out
        self.create_secondary_features()
    
    
    def fix_rotation(self,):
        """Loops over rotations until the image is corrected."""
        while abs(self.theta_degrees) > self.eye_slope_threshold:
            self.apply_rotation(angle=self.theta_degrees)
        
    
    def create_area_features(self):
        nots = ['Background/undefined']
        total_area = {
            lab: np.count_nonzero(self.segmask.mask == code) 
            for lab, code in self.segmask.label_to_code.items()
            if lab not in nots
        }
        s = sum(total_area.values())
        total_area = {'pct_of_head_{}'.format(k): v/s for k,v in total_area.items()}
        return s, total_area
    
    
    def create_eye_features(self):
        """Writes the eye features."""
        self.segmask.add_eyes()
        self.eye_slope = self.segmask.compute_eye_slope()          
        if self.eye_slope > 0.0:            
            self.theta_degrees = \
                -1.0*compute_theta(slope1=self.eye_slope, slope2=0.0)
            
        elif self.eye_slope < 0.0:
            self.theta_degrees = \
                compute_theta(slope2=self.eye_slope, slope1=0.0)
        
        elif self.eye_slope == 0.0:
            self.theta_degrees = \
                compute_theta(slope2=self.eye_slope, slope1=0.0)
        
        
    def create_secondary_features(self):
        """Writes the secondary facial features, dependent on reference features."""
        self.segmask.add_cheeks()
        self.segmask.add_forehead()

    
    def compute_skintones(self, sampling_strategy, thresh=None):
        """Computes skin tones for all regions"""
        if thresh:
            self.segmask.calc_new_decision(thresh=thresh)
        
        return {
            region: self.segmask.calc_region_color(
                region=region, 
                sampling_strategy=sampling_strategy
            )
            for region in self.segmask.label_to_code.keys()
        }
    
    
    def compute_bizygomatic_width(self):
        """"""
        # Sources: 
        # https://carta.anthropogeny.org/moca/topics/upper-facial-height

        right_eye_right = self.segmask.find_region_extrema(
            region='right_eye',direction='right',
        )
        right_cheek_right = self.segmask.find_region_extrema(
            region='right_cheek',direction='right',
        )
        bizygomatic_right = (float(right_cheek_right[0]), float(right_eye_right[1]))

        left_eye_left = self.segmask.find_region_extrema(
            region='left_eye',direction='left',
        )
        left_cheek_left = self.segmask.find_region_extrema(
            region='left_cheek',direction='left',
        )
        bizygomatic_left = (float(left_cheek_left[0]), float(left_eye_left[1]))
        bizygomatic_dist = bizygomatic_right[0] - bizygomatic_left[0]

        return bizygomatic_left, bizygomatic_right, bizygomatic_dist
        
        
    def compute_upperfacial_height(self):
        """"""
        upperfacial_top = self.segmask.find_region_extrema(
            region='Eyebrows',direction='bottom',
        )
        upperfacial_bottom = self.segmask.find_region_extrema(
            region='Lips',direction='top',
        )
        upperfacial_top = (float(upperfacial_top[0]), float(upperfacial_top[1]))
        upperfacial_bottom = (float(upperfacial_bottom[0]), float(upperfacial_bottom[1]))
        upperfacial_dist = upperfacial_bottom[1] - upperfacial_top[1]
        
        return upperfacial_top, upperfacial_bottom, upperfacial_dist

        
    def get_skintones_in_lab(self):
        """
        Returns skin tones in LAB colors
        """
        return {
            k: apply_rgb2lab(v) 
            for k, v in self.skin_tones.items()
        }

path = Path("/ws/data/skin-tone/headsegmentation_dataset_ccncsa")
mod_dir = path/'Models'
mod_fp = mod_dir/'checkpoint_20201007'
# TODO: figure out how to get the labels from the trained model...
output_classes = ['Background/undefined', 'Lips', 'Eyes', 'Nose', 'Hair', 
  'Ears', 'Eyebrows', 'Teeth', 'General face', 'Facial hair',
  'Specs/sunglasses']
size = 224

truth_fp = Path('/ws/data/skin-tone/ScreenshotFaceAfterStatement/erics_imgs/MasterFacesDatabaseLabels.xlsx - labels.csv') 
truth = pd.read_csv(truth_fp)
truth.index = truth['PostStatementPhotoFilename']
del truth['PostStatementPhotoFilename']
truth['L_m'] = (truth['L_l'] + truth['L_r']) / 2
truth['a_m'] = (truth['a_l'] + truth['a_r']) / 2
truth['b_m'] = (truth['b_l'] + truth['b_r']) / 2

def run_summary(fp, summary, attempt=1, num_attempts=10):
    print(fp)
    outimg = write_to/'imgs'/(fp.stem+'.jpg')
    
    if outimg.exists():
        return None, None

    try: del img
    except: pass
    img = MaskedImg()
    img.load_from_file(fn=fp)

    try: del model
    except: pass

    model = TrainedSegmentationModel(
        mod_fp=mod_fp, 
        input_size=size,
        output_classes=output_classes
    )

    try: del profile
    except: pass
    
    try:
        profile = FacialProfile(
            model=model, 
            img=img, 
            sampling_strategy='use_all', 
            align_face=True
        )
    except:
        if not attempt > num_attempts:
            return run_summary(
                fp=fp, 
                summary=summary,
                attempt=attempt+1,
                num_attempts=num_attempts
            )
        else:
            return None, None
    
    plt.figure(figsize=(10,10))
    plt.imshow(profile.segmask.decoded_img.img)
    plt.imshow(
        skimage.color.label2rgb(np.array(profile.segmask.mask)), 
        alpha=0.3
    )
    plt.title('Computed fWHR based on Segmentation Only (not FaceMesh).\nfWHR: {}'.format(profile.fwhr))

    plt.scatter(x=[profile.bizygomatic_right[0]], 
                y=[profile.bizygomatic_right[1]], 
                marker='+', c='orange')
    plt.scatter(x=[profile.bizygomatic_left[0]], 
                y=[profile.bizygomatic_left[1]], 
                marker='+', c='orange')
    plt.plot(
        [profile.bizygomatic_right[0], profile.bizygomatic_right[0]], 
        [0, profile.segmask.mask.shape[1]-1],'ro-')
    plt.plot(
        [profile.bizygomatic_left[0], profile.bizygomatic_left[0]], 
        [0, profile.segmask.mask.shape[1]-1],'ro-')

    plt.scatter(x=[profile.upperfacial_top[0]], 
                y=[profile.upperfacial_top[1]],
                marker='+', c='red')
    plt.plot(
        [0, profile.segmask.mask.shape[0]-1], 
        [profile.upperfacial_top[1], profile.upperfacial_top[1]],
        'go-'
    )

    plt.scatter(x=[profile.upperfacial_bottom[0]], 
                y=[profile.upperfacial_bottom[1]],
                marker='+', c='red')
    plt.plot(
        [0, profile.segmask.mask.shape[0]-1], 
        [profile.upperfacial_bottom[1], profile.upperfacial_bottom[1]], 'go-')
    plt.savefig(outimg, format='jpeg')
    
    row = profile.get_profile()
    row['model_id'] = mod_fp
    
    return row, plt
    

erics_img = Path('/ws/data/skin-tone/from_zenodo/Media/MediaForExport/')
ls = [fp for fp in list(erics_img.ls()) if str(fp)[-4:] == '.jpg']
# row = truth.loc[fp.name].to_dict()
write_to = Path('/ws/data/skin-tone/output1')
summary = []

fp = ls[1]
outimg = write_to/'imgs'/(fp.stem+'.jpg')

try: del img
except: pass
img = MaskedImg()
img.load_from_file(fn=fp)

try: del model
except: pass

model = TrainedSegmentationModel(
    mod_fp=mod_fp, 
    input_size=size,
    output_classes=output_classes
)

try: del profile
except: pass


profile = FacialProfile(
    model=model, 
    img=img, 
    sampling_strategy='use_all', 
    align_face=True
)

plt.imshow(profile.img.img)
<matplotlib.image.AxesImage at 0x7f907811e6a0>

print(profile.bizygomatic_right)
profile.bizygomatic_left
(172.0, 83.0)
(92.0, 82.0)

def plot_all(save=False)
    plt.figure(figsize=(10,10))
    plt.imshow(profile.segmask.decoded_img.img)
    plt.imshow(
        skimage.color.label2rgb(np.array(profile.segmask.mask)), 
        alpha=0.3
    )
    plt.title('Computed fWHR based on Segmentation Only (not FaceMesh).\nfWHR: {}'.format(profile.fwhr))

    plt.scatter(x=[profile.bizygomatic_right[0]], 
                y=[profile.bizygomatic_right[1]], 
                marker='+', c='orange')
    plt.scatter(x=[profile.bizygomatic_left[0]], 
                y=[profile.bizygomatic_left[1]], 
                marker='+', c='orange')
    plt.plot(
        [profile.bizygomatic_right[0], profile.bizygomatic_right[0]], 
        [0, profile.segmask.mask.shape[1]-1],'ro-')
    plt.plot(
        [profile.bizygomatic_left[0], profile.bizygomatic_left[0]], 
        [0, profile.segmask.mask.shape[1]-1],'ro-')

    plt.scatter(x=[profile.upperfacial_top[0]], 
                y=[profile.upperfacial_top[1]],
                marker='+', c='red')
    plt.plot(
        [0, profile.segmask.mask.shape[0]-1], 
        [profile.upperfacial_top[1], profile.upperfacial_top[1]],
        'go-'
    )

    plt.scatter(x=[profile.upperfacial_bottom[0]], 
                y=[profile.upperfacial_bottom[1]],
                marker='+', c='red')
    plt.plot(
        [0, profile.segmask.mask.shape[0]-1], 
        [profile.upperfacial_bottom[1], profile.upperfacial_bottom[1]], 'go-')
    plt.savefig(outimg, format='jpeg')

    row = profile.get_profile()
    row['model_id'] = mod_fp

    ##Exceptions
    # ZeroDivisionError, RuntimeError

summary = []
failures = []
for fp in ls[:10]:
    if str(fp)[-4:] != '.jpg': continue
    try:
        row, plt = run_summary(fp=fp, summary=summary)
    except:
        failures.append(fp)
        continue
    if not row and not plt:
        failures.append(fp)
        continue
    
    summary.append(row)
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg
/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg

failures
[Path('/ws/data/skin-tone/from_zenodo/Media/MediaForExport/3e9d3a9c-c62d-4e17-b370-ff1b8c0a16c4.jpg'),
 Path('/ws/data/skin-tone/from_zenodo/Media/MediaForExport/a9f8a933-3428-4213-bfac-30b6a8c8daca.jpg')]

profile.get_profile()
{'rgb_of_Background/undefined': (107, 104, 104),
 'rgb_of_Lips': (132, 100, 110),
 'rgb_of_Nose': (168, 144, 140),
 'rgb_of_Hair': (50, 48, 52),
 'rgb_of_Ears': (56, 21, 34),
 'rgb_of_Eyebrows': (80, 73, 77),
 'rgb_of_Teeth': None,
 'rgb_of_General face': (118, 112, 112),
 'rgb_of_Facial hair': None,
 'rgb_of_Specs/sunglasses': None,
 'rgb_of_right_eye': (91, 87, 93),
 'rgb_of_left_eye': (80, 76, 85),
 'rgb_of_left_cheek': (173, 153, 150),
 'rgb_of_right_cheek': (148, 129, 126),
 'rgb_of_forehead': (180, 156, 148),
 'img_eye_slope': -0.0,
 'fwhr': 1.7222222222222223,
 'bizygoatic_w_px': 62.0,
 'upperfacial_h_px': 36.0,
 'tot_head_area_px': 10828,
 'pct_of_head_Lips': 0.012929442186922793,
 'pct_of_head_Nose': 0.025951237532323604,
 'pct_of_head_Hair': 0.17694865164388623,
 'pct_of_head_Ears': 0.0001847063169560399,
 'pct_of_head_Eyebrows': 0.010251200591060215,
 'pct_of_head_Teeth': 0.0,
 'pct_of_head_General face': 0.5391577391946805,
 'pct_of_head_Facial hair': 0.0,
 'pct_of_head_Specs/sunglasses': 0.0,
 'pct_of_head_right_eye': 0.004155892131510898,
 'pct_of_head_left_eye': 0.0034170668636867383,
 'pct_of_head_left_cheek': 0.05744366457332841,
 'pct_of_head_right_cheek': 0.05227188769855929,
 'pct_of_head_forehead': 0.11728851126708534}

plt.figure(figsize = (8,8))
plt.title('Original Image, Rotated and Shrunken to size={}'.format(size))
plt.imshow(profile.segmask.decoded_img.img)
<matplotlib.image.AxesImage at 0x7f6a093e5940>

plt.figure(figsize = (8,8))
plt.title('Enhanced Segmentation Model Output')
plt.imshow(profile.segmask.mask)
<matplotlib.image.AxesImage at 0x7f6a06a3a2e8>

Compute FaceMesh (google)

profile.mesh.draw(thickness=1, circle_radius=1)

plt.figure(figsize=(10,10))
plt.imshow(profile.segmask.decoded_img.img)
plt.imshow(
    skimage.color.label2rgb(np.array(profile.segmask.mask)), 
    alpha=0.3
)
plt.title('Computed fWHR based on Segmentation Only (not FaceMesh).\nfWHR: {}'.format(profile.fwhr))

plt.scatter(x=[profile.bizygomatic_right[0]], 
            y=[profile.bizygomatic_right[1]], 
            marker='+', c='orange')
plt.scatter(x=[profile.bizygomatic_left[0]], 
            y=[profile.bizygomatic_left[1]], 
            marker='+', c='orange')
plt.plot(
    [profile.bizygomatic_right[0], profile.bizygomatic_right[0]], 
    [0, profile.segmask.mask.shape[1]-1],'ro-')
plt.plot(
    [profile.bizygomatic_left[0], profile.bizygomatic_left[0]], 
    [0, profile.segmask.mask.shape[1]-1],'ro-')

plt.scatter(x=[profile.upperfacial_top[0]], 
            y=[profile.upperfacial_top[1]],
            marker='+', c='red')
plt.plot(
    [0, profile.segmask.mask.shape[0]-1], 
    [profile.upperfacial_top[1], profile.upperfacial_top[1]],
    'go-'
)

plt.scatter(x=[profile.upperfacial_bottom[0]], 
            y=[profile.upperfacial_bottom[1]],
            marker='+', c='red')
plt.plot(
    [0, profile.segmask.mask.shape[0]-1], 
    [profile.upperfacial_bottom[1], profile.upperfacial_bottom[1]], 'go-')
/opt/conda/lib/python3.6/site-packages/ipykernel_launcher.py:4: FutureWarning: The new recommended value for bg_label is 0. Until version 0.19, the default bg_label value is -1. From version 0.19, the bg_label default value will be 0. To avoid this warning, please explicitly set bg_label value.
  after removing the cwd from sys.path.
[<matplotlib.lines.Line2D at 0x7f0fd737a048>]