auto cropping now works with non square crops

This commit is contained in:
captin411 2022-10-23 04:11:07 -07:00 committed by GitHub
parent 0ddaf8d202
commit 1be5933ba2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 269 additions and 240 deletions

View File

@ -1,241 +1,270 @@
import cv2 import cv2
from collections import defaultdict from collections import defaultdict
from math import log, sqrt from math import log, sqrt
import numpy as np import numpy as np
from PIL import Image, ImageDraw from PIL import Image, ImageDraw
GREEN = "#0F0" GREEN = "#0F0"
BLUE = "#00F" BLUE = "#00F"
RED = "#F00" RED = "#F00"
def crop_image(im, settings): def crop_image(im, settings):
""" Intelligently crop an image to the subject matter """ """ Intelligently crop an image to the subject matter """
if im.height > im.width:
im = im.resize((settings.crop_width, settings.crop_height * im.height // im.width)) scale_by = 1
elif im.width > im.height: if is_landscape(im.width, im.height):
im = im.resize((settings.crop_width * im.width // im.height, settings.crop_height)) scale_by = settings.crop_height / im.height
else: elif is_portrait(im.width, im.height):
im = im.resize((settings.crop_width, settings.crop_height)) scale_by = settings.crop_width / im.width
elif is_square(im.width, im.height):
if im.height == im.width: if is_square(settings.crop_width, settings.crop_height):
return im scale_by = settings.crop_width / im.width
elif is_landscape(settings.crop_width, settings.crop_height):
focus = focal_point(im, settings) scale_by = settings.crop_width / im.width
elif is_portrait(settings.crop_width, settings.crop_height):
# take the focal point and turn it into crop coordinates that try to center over the focal scale_by = settings.crop_height / im.height
# point but then get adjusted back into the frame
y_half = int(settings.crop_height / 2) im = im.resize((int(im.width * scale_by), int(im.height * scale_by)))
x_half = int(settings.crop_width / 2)
if im.width == settings.crop_width and im.height == settings.crop_height:
x1 = focus.x - x_half if settings.annotate_image:
if x1 < 0: d = ImageDraw.Draw(im)
x1 = 0 rect = [0, 0, im.width, im.height]
elif x1 + settings.crop_width > im.width: rect[2] -= 1
x1 = im.width - settings.crop_width rect[3] -= 1
d.rectangle(rect, outline=GREEN)
y1 = focus.y - y_half if settings.destop_view_image:
if y1 < 0: im.show()
y1 = 0 return im
elif y1 + settings.crop_height > im.height:
y1 = im.height - settings.crop_height focus = focal_point(im, settings)
x2 = x1 + settings.crop_width # take the focal point and turn it into crop coordinates that try to center over the focal
y2 = y1 + settings.crop_height # point but then get adjusted back into the frame
y_half = int(settings.crop_height / 2)
crop = [x1, y1, x2, y2] x_half = int(settings.crop_width / 2)
if settings.annotate_image: x1 = focus.x - x_half
d = ImageDraw.Draw(im) if x1 < 0:
rect = list(crop) x1 = 0
rect[2] -= 1 elif x1 + settings.crop_width > im.width:
rect[3] -= 1 x1 = im.width - settings.crop_width
d.rectangle(rect, outline=GREEN)
if settings.destop_view_image: y1 = focus.y - y_half
im.show() if y1 < 0:
y1 = 0
return im.crop(tuple(crop)) elif y1 + settings.crop_height > im.height:
y1 = im.height - settings.crop_height
def focal_point(im, settings):
corner_points = image_corner_points(im, settings) x2 = x1 + settings.crop_width
entropy_points = image_entropy_points(im, settings) y2 = y1 + settings.crop_height
face_points = image_face_points(im, settings)
crop = [x1, y1, x2, y2]
total_points = len(corner_points) + len(entropy_points) + len(face_points)
if settings.annotate_image:
corner_weight = settings.corner_points_weight d = ImageDraw.Draw(im)
entropy_weight = settings.entropy_points_weight rect = list(crop)
face_weight = settings.face_points_weight rect[2] -= 1
rect[3] -= 1
weight_pref_total = corner_weight + entropy_weight + face_weight d.rectangle(rect, outline=GREEN)
if settings.destop_view_image:
# weight things im.show()
pois = []
if weight_pref_total == 0 or total_points == 0: return im.crop(tuple(crop))
return pois
def focal_point(im, settings):
pois.extend( corner_points = image_corner_points(im, settings)
[ PointOfInterest( p.x, p.y, weight=p.weight * ( (corner_weight/weight_pref_total) / (len(corner_points)/total_points) )) for p in corner_points ] entropy_points = image_entropy_points(im, settings)
) face_points = image_face_points(im, settings)
pois.extend(
[ PointOfInterest( p.x, p.y, weight=p.weight * ( (entropy_weight/weight_pref_total) / (len(entropy_points)/total_points) )) for p in entropy_points ] total_points = len(corner_points) + len(entropy_points) + len(face_points)
)
pois.extend( corner_weight = settings.corner_points_weight
[ PointOfInterest( p.x, p.y, weight=p.weight * ( (face_weight/weight_pref_total) / (len(face_points)/total_points) )) for p in face_points ] entropy_weight = settings.entropy_points_weight
) face_weight = settings.face_points_weight
average_point = poi_average(pois, settings) weight_pref_total = corner_weight + entropy_weight + face_weight
if settings.annotate_image: # weight things
d = ImageDraw.Draw(im) pois = []
for f in face_points: if weight_pref_total == 0 or total_points == 0:
d.rectangle(f.bounding(f.size), outline=RED) return pois
for f in entropy_points:
d.rectangle(f.bounding(30), outline=BLUE) pois.extend(
for poi in pois: [ PointOfInterest( p.x, p.y, weight=p.weight * ( (corner_weight/weight_pref_total) / (len(corner_points)/total_points) )) for p in corner_points ]
w = max(4, 4 * 0.5 * sqrt(poi.weight)) )
d.ellipse(poi.bounding(w), fill=BLUE) pois.extend(
d.ellipse(average_point.bounding(25), outline=GREEN) [ PointOfInterest( p.x, p.y, weight=p.weight * ( (entropy_weight/weight_pref_total) / (len(entropy_points)/total_points) )) for p in entropy_points ]
)
return average_point pois.extend(
[ PointOfInterest( p.x, p.y, weight=p.weight * ( (face_weight/weight_pref_total) / (len(face_points)/total_points) )) for p in face_points ]
)
def image_face_points(im, settings):
np_im = np.array(im) average_point = poi_average(pois, settings)
gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
if settings.annotate_image:
tries = [ d = ImageDraw.Draw(im)
[ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ], for f in face_points:
[ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ], d.rectangle(f.bounding(f.size), outline=RED)
[ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ], for f in entropy_points:
[ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ], d.rectangle(f.bounding(30), outline=BLUE)
[ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ], for poi in pois:
[ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ], w = max(4, 4 * 0.5 * sqrt(poi.weight))
[ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ], d.ellipse(poi.bounding(w), fill=BLUE)
[ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ] d.ellipse(average_point.bounding(25), outline=GREEN)
]
return average_point
for t in tries:
# print(t[0])
classifier = cv2.CascadeClassifier(t[0]) def image_face_points(im, settings):
minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side np_im = np.array(im)
try: gray = cv2.cvtColor(np_im, cv2.COLOR_BGR2GRAY)
faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE) tries = [
except: [ f'{cv2.data.haarcascades}haarcascade_eye.xml', 0.01 ],
continue [ f'{cv2.data.haarcascades}haarcascade_frontalface_default.xml', 0.05 ],
[ f'{cv2.data.haarcascades}haarcascade_profileface.xml', 0.05 ],
if len(faces) > 0: [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt.xml', 0.05 ],
rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces] [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt2.xml', 0.05 ],
return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2])) for r in rects] [ f'{cv2.data.haarcascades}haarcascade_frontalface_alt_tree.xml', 0.05 ],
return [] [ f'{cv2.data.haarcascades}haarcascade_eye_tree_eyeglasses.xml', 0.05 ],
[ f'{cv2.data.haarcascades}haarcascade_upperbody.xml', 0.05 ]
]
def image_corner_points(im, settings):
grayscale = im.convert("L") for t in tries:
# print(t[0])
# naive attempt at preventing focal points from collecting at watermarks near the bottom classifier = cv2.CascadeClassifier(t[0])
gd = ImageDraw.Draw(grayscale) minsize = int(min(im.width, im.height) * t[1]) # at least N percent of the smallest side
gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999") try:
faces = classifier.detectMultiScale(gray, scaleFactor=1.1,
np_im = np.array(grayscale) minNeighbors=7, minSize=(minsize, minsize), flags=cv2.CASCADE_SCALE_IMAGE)
except:
points = cv2.goodFeaturesToTrack( continue
np_im,
maxCorners=100, if len(faces) > 0:
qualityLevel=0.04, rects = [[f[0], f[1], f[0] + f[2], f[1] + f[3]] for f in faces]
minDistance=min(grayscale.width, grayscale.height)*0.07, return [PointOfInterest((r[0] +r[2]) // 2, (r[1] + r[3]) // 2, size=abs(r[0]-r[2])) for r in rects]
useHarrisDetector=False, return []
)
if points is None: def image_corner_points(im, settings):
return [] grayscale = im.convert("L")
focal_points = [] # naive attempt at preventing focal points from collecting at watermarks near the bottom
for point in points: gd = ImageDraw.Draw(grayscale)
x, y = point.ravel() gd.rectangle([0, im.height*.9, im.width, im.height], fill="#999")
focal_points.append(PointOfInterest(x, y, size=4))
np_im = np.array(grayscale)
return focal_points
points = cv2.goodFeaturesToTrack(
np_im,
def image_entropy_points(im, settings): maxCorners=100,
landscape = im.height < im.width qualityLevel=0.04,
portrait = im.height > im.width minDistance=min(grayscale.width, grayscale.height)*0.07,
if landscape: useHarrisDetector=False,
move_idx = [0, 2] )
move_max = im.size[0]
elif portrait: if points is None:
move_idx = [1, 3] return []
move_max = im.size[1]
else: focal_points = []
return [] for point in points:
x, y = point.ravel()
e_max = 0 focal_points.append(PointOfInterest(x, y, size=4))
crop_current = [0, 0, settings.crop_width, settings.crop_height]
crop_best = crop_current return focal_points
while crop_current[move_idx[1]] < move_max:
crop = im.crop(tuple(crop_current))
e = image_entropy(crop) def image_entropy_points(im, settings):
landscape = im.height < im.width
if (e > e_max): portrait = im.height > im.width
e_max = e if landscape:
crop_best = list(crop_current) move_idx = [0, 2]
move_max = im.size[0]
crop_current[move_idx[0]] += 4 elif portrait:
crop_current[move_idx[1]] += 4 move_idx = [1, 3]
move_max = im.size[1]
x_mid = int(crop_best[0] + settings.crop_width/2) else:
y_mid = int(crop_best[1] + settings.crop_height/2) return []
return [PointOfInterest(x_mid, y_mid, size=25)] e_max = 0
crop_current = [0, 0, settings.crop_width, settings.crop_height]
crop_best = crop_current
def image_entropy(im): while crop_current[move_idx[1]] < move_max:
# greyscale image entropy crop = im.crop(tuple(crop_current))
# band = np.asarray(im.convert("L")) e = image_entropy(crop)
band = np.asarray(im.convert("1"), dtype=np.uint8)
hist, _ = np.histogram(band, bins=range(0, 256)) if (e > e_max):
hist = hist[hist > 0] e_max = e
return -np.log2(hist / hist.sum()).sum() crop_best = list(crop_current)
crop_current[move_idx[0]] += 4
def poi_average(pois, settings): crop_current[move_idx[1]] += 4
weight = 0.0
x = 0.0 x_mid = int(crop_best[0] + settings.crop_width/2)
y = 0.0 y_mid = int(crop_best[1] + settings.crop_height/2)
for poi in pois:
weight += poi.weight return [PointOfInterest(x_mid, y_mid, size=25)]
x += poi.x * poi.weight
y += poi.y * poi.weight
avg_x = round(x / weight) def image_entropy(im):
avg_y = round(y / weight) # greyscale image entropy
# band = np.asarray(im.convert("L"))
return PointOfInterest(avg_x, avg_y) band = np.asarray(im.convert("1"), dtype=np.uint8)
hist, _ = np.histogram(band, bins=range(0, 256))
hist = hist[hist > 0]
class PointOfInterest: return -np.log2(hist / hist.sum()).sum()
def __init__(self, x, y, weight=1.0, size=10):
self.x = x
self.y = y def poi_average(pois, settings):
self.weight = weight weight = 0.0
self.size = size x = 0.0
y = 0.0
def bounding(self, size): for poi in pois:
return [ weight += poi.weight
self.x - size//2, x += poi.x * poi.weight
self.y - size//2, y += poi.y * poi.weight
self.x + size//2, avg_x = round(x / weight)
self.y + size//2 avg_y = round(y / weight)
]
return PointOfInterest(avg_x, avg_y)
class Settings:
def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5, annotate_image=False): def is_landscape(w, h):
self.crop_width = crop_width return w > h
self.crop_height = crop_height
self.corner_points_weight = corner_points_weight
self.entropy_points_weight = entropy_points_weight def is_portrait(w, h):
self.face_points_weight = entropy_points_weight return h > w
self.annotate_image = annotate_image
def is_square(w, h):
return w == h
class PointOfInterest:
def __init__(self, x, y, weight=1.0, size=10):
self.x = x
self.y = y
self.weight = weight
self.size = size
def bounding(self, size):
return [
self.x - size//2,
self.y - size//2,
self.x + size//2,
self.y + size//2
]
class Settings:
def __init__(self, crop_width=512, crop_height=512, corner_points_weight=0.5, entropy_points_weight=0.5, face_points_weight=0.5, annotate_image=False):
self.crop_width = crop_width
self.crop_height = crop_height
self.corner_points_weight = corner_points_weight
self.entropy_points_weight = entropy_points_weight
self.face_points_weight = entropy_points_weight
self.annotate_image = annotate_image
self.destop_view_image = False self.destop_view_image = False