summaryrefslogtreecommitdiffstats
path: root/FocusStack.py
diff options
context:
space:
mode:
authorCharles McGuinness <charles@mcguinness.us>2015-12-14 13:00:50 -0500
committerCharles McGuinness <charles@mcguinness.us>2015-12-14 13:00:50 -0500
commit8ab0a37794cd131d4baef5f6ceecffc568947505 (patch)
tree0ab8d3123e37f7c3e2c8261e0c53b17b55766367 /FocusStack.py
parentefee783cb87fe2015ab719699e80a661aa3b4d4b (diff)
downloadfocusstack-8ab0a37794cd131d4baef5f6ceecffc568947505.zip
focusstack-8ab0a37794cd131d4baef5f6ceecffc568947505.tar.gz
focusstack-8ab0a37794cd131d4baef5f6ceecffc568947505.tar.bz2
Create FocusStack.py
Diffstat (limited to 'FocusStack.py')
-rw-r--r--FocusStack.py135
1 files changed, 135 insertions, 0 deletions
diff --git a/FocusStack.py b/FocusStack.py
new file mode 100644
index 0000000..d33f36d
--- /dev/null
+++ b/FocusStack.py
@@ -0,0 +1,135 @@
+"""
+
+Simple Focus Stacker
+
+ Author: Charles McGuinness (charles@mcguinness.us)
+ Copyright: Copyright 2015 Charles McGuinness
+ License: Apache License 2.0
+
+
+This code will take a series of images and merge them so that each
+pixel is taken from the image with the sharpest focus at that location.
+
+The logic is roughly the following:
+
+1. Scale and align the images. Changing the focus on a lens, even
+ if the camera remains fixed, causes a mild zooming on the images.
+ We need to correct the images so they line up perfectly on top
+ of each other.
+
+2. Perform a gaussian blur on all images
+
+3. Compute the laplacian on the blurred image to generate a gradient map
+
+4. Create a blank output image with the same size as the original input
+ images
+
+4. For each pixel [x,y] in the output image, copy the pixel [x,y] from
+ the input image which has the largest gradient [x,y]
+
+
+This algorithm was inspired by the high-level description given at
+
+http://stackoverflow.com/questions/15911783/what-are-some-common-focus-stacking-algorithms
+
+"""
+
+import numpy as np
+import cv2
+
+def findHomography(image_1_kp, image_2_kp, matches):
+ image_1_points = np.zeros((len(matches), 1, 2), dtype=np.float32)
+ image_2_points = np.zeros((len(matches), 1, 2), dtype=np.float32)
+
+ for i in range(0,len(matches)):
+ image_1_points[i] = image_1_kp[matches[i].queryIdx].pt
+ image_2_points[i] = image_2_kp[matches[i].trainIdx].pt
+
+
+ homography, mask = cv2.findHomography(image_1_points, image_2_points, cv2.RANSAC, ransacReprojThreshold=2.0)
+
+ return homography
+
+
+#
+# Align the images so they overlap properly...
+#
+#
+def align_images(images):
+ use_sift = False
+
+ outimages = []
+
+ if use_sift:
+ detector = cv2.SIFT()
+ else:
+ detector = cv2.ORB(1000)
+
+ # We assume that image 0 is the "base" image and align everything to it
+ print "Detecting features of base image"
+ outimages.append(images[0])
+ image1gray = cv2.cvtColor(images[0],cv2.COLOR_BGR2GRAY)
+ image_1_kp, image_1_desc = detector.detectAndCompute(image1gray, None)
+
+ for i in range(1,len(images)):
+ print "Aligning image {}".format(i)
+ image_i_kp, image_i_desc = detector.detectAndCompute(images[i], None)
+
+ if use_sift:
+ bf = cv2.BFMatcher()
+ # This returns the top two matches for each feature point (list of list)
+ pairMatches = bf.knnMatch(image_i_desc,image_1_desc, k=2)
+ rawMatches = []
+ for m,n in pairMatches:
+ if m.distance < 0.7*n.distance:
+ rawMatches.append(m)
+ else:
+ bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
+ rawMatches = bf.match(image_i_desc, image_1_desc)
+
+ sortMatches = sorted(rawMatches, key=lambda x: x.distance)
+ matches = sortMatches[0:128]
+
+
+
+ hom = findHomography(image_i_kp, image_1_kp, matches)
+ newimage = cv2.warpPerspective(images[i], hom, (images[i].shape[1], images[i].shape[0]), flags=cv2.INTER_LINEAR)
+
+ outimages.append(newimage)
+
+
+
+ return outimages
+
+#
+# Do a lapacian or other filter
+def doLap(image):
+ kernel_size = 9 # YOU SHOULD TUNE THIS VALUE TO SUIT YOUR NEEDS
+ blurred = cv2.GaussianBlur(image, (kernel_size,kernel_size), 0)
+ return cv2.Laplacian(blurred, cv2.CV_64F, ksize=kernel_size)
+
+#
+# This routine finds the points of best focus in all images and produces a merged result...
+#
+def focus_stack(unimages):
+ images = align_images(unimages)
+
+ print "Computing the laplacian of the blurred images"
+ laps = []
+ for i in range(len(images)):
+ print "Lap {}".format(i)
+ laps.append(doLap(cv2.cvtColor(images[i],cv2.COLOR_BGR2GRAY)))
+
+ laps = np.asarray(laps)
+ print "Shape of array of laplacians = {}".format(laps.shape)
+
+ output = np.zeros(shape=images[0].shape, dtype=images[0].dtype)
+
+ for y in range(0,images[0].shape[0]):
+ for x in range(0, images[0].shape[1]):
+ yxlaps = abs(laps[:, y, x])
+ index = (np.where(yxlaps == max(yxlaps)))[0][0]
+ output[y,x] = images[index][y,x]
+
+ return output
+