- wilmer@veer:~/src/chroma$ cat test.py
- #!/usr/bin/env python3
- VID = "/dev/v4l/by-id/usb-046d_HD_Pro_Webcam_C920_DDFB217F-video-index0"
- BACK = "back.mp4"
- RES = (640, 360) # VGA but 16:9
- LOOPBACK = "/dev/video6"
- import argparse
- import subprocess
- import numpy as np
- import cv2
- import pyfakewebcam
- parser = argparse.ArgumentParser(description="Chroma key using OpenCV background subtraction")
- parser.add_argument("-b", "--background", help="Background image or animation", default=BACK)
- parser.add_argument("-c", "--camera", help="Camera device", default=VID)
- parser.add_argument("-r", "--resolution", help="Capture resolution", default=RES, nargs=2, type=int, metavar=("W", "H"))
- parser.add_argument("-l", "--loopback", help="v4l2loopback device", default=LOOPBACK)
- parser.add_argument("-s", "--show", help="Show result in own window", action="store_true")
- args = parser.parse_args()
- cap = cv2.VideoCapture(args.camera)
- cap.set(3, args.resolution[0])
- cap.set(4, args.resolution[1])
- fgbg = cv2.createBackgroundSubtractorMOG2(history=50, detectShadows=False)
- subprocess.Popen(["v4l2-ctl", "-d", args.camera, "-c", "exposure_auto=3"]).communicate()
- # Let camera do auto-exposure, and let MOG2 do its learning.
- for i in range(25):
- ret, frame = cap.read()
- fgmask = fgbg.apply(frame, learningRate=1)
- subprocess.Popen(["v4l2-ctl", "-d", args.camera, "-c", "exposure_auto=1"]).communicate()
- back_still = cv2.imread(args.background)
- if isinstance(back_still, np.ndarray):
- back = back_still
- back_move = None
- else:
- back_move = cv2.VideoCapture(args.background)
- _, back = back_move.read() # Waste the first frame, sorry!
- print(back.shape)
- back_res = tuple(back.shape[1::-1])
- back_ratio = back_res[0] / back_res[1]
- print(back_ratio)
- if back_ratio >= (args.resolution[0] / args.resolution[1]):
- # background (therefore result we'll dump on loopback) is wider than input.
- # equal height for both, paste camera at non-0 x.
- out_res = (int(args.resolution[1] * back_ratio) & ~1, args.resolution[1])
- fg_offset = (int((out_res[0] - args.resolution[0]) / 2), 0)
- else:
- # background is narrower than input. paste camera at bottom, non-0 y.
- out_res = (args.resolution[0], int(args.resolution[0] / back_ratio) & ~1)
- fg_offset = (0, out_res[1] - args.resolution[1])
- print(back_res)
- print(back_ratio)
- print(out_res)
- print(fg_offset)
- if isinstance(back_still, np.ndarray): # dumbfuck numpy doesn't like being treated as bool.
- back_still = cv2.resize(back_still, out_res)
- outcam = pyfakewebcam.FakeWebcam(args.loopback, *out_res)
- #os.exit(1)
- face_finder = cv2.CascadeClassifier("face.xml")
- def overlay_image_alpha(img, img_overlay, pos, alpha_mask):
- """Overlay img_overlay on top of img at the position specified by
- pos and blend using alpha_mask.
- Alpha mask must contain values within the range [0, 1] and be the
- same size as img_overlay.
- """
- x, y = pos
- # Image ranges
- y1, y2 = max(0, y), min(img.shape[0], y + img_overlay.shape[0])
- x1, x2 = max(0, x), min(img.shape[1], x + img_overlay.shape[1])
- # Overlay ranges
- y1o, y2o = max(0, -y), min(img_overlay.shape[0], img.shape[0] - y)
- x1o, x2o = max(0, -x), min(img_overlay.shape[1], img.shape[1] - x)
- # Exit if nothing to do
- if y1 >= y2 or x1 >= x2 or y1o >= y2o or x1o >= x2o:
- return
- channels = img.shape[2]
- alpha = alpha_mask[y1o:y2o, x1o:x2o]
- alpha_inv = 1.0 - alpha
- for c in range(channels):
- img[y1:y2, x1:x2, c] = (alpha * img_overlay[y1o:y2o, x1o:x2o, c] + alpha_inv * img[y1:y2, x1:x2, c])
- show = "o" # show Output
- while(1):
- ret, frame = cap.read()
- fgmask = fgbg.apply(frame, learningRate=0)
- if False:
- mask = np.zeros(fgmask.shape)
- bw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
- faces = face_finder.detectMultiScale(bw, 1.1, 4)
- for face in faces:
- x1, y1 = face[0:2]
- x2, y2 = (x1 + face[2]), (y1 + face[3])
- cv2.rectangle(fgmask, (x1, y1), (x2, y2), 1, 0)
- #cv2.rectangle(fgmask, (x1, y1), (x2, y2), 1, 0)
- #fgmask = fgmask * mask
- fgmask = fgmask / 255.0
- fgmask = cv2.GaussianBlur(fgmask, (11, 11), 0)
- ret, fgmask = cv2.threshold(fgmask, .6, 1, cv2.THRESH_TOZERO)
- if back_move:
- ret = False
- while not ret:
- ret, back = back_move.read()
- if not ret:
- back_move = cv2.VideoCapture(args.background)
- res = cv2.resize(back, out_res)
- else:
- res = np.copy(back_still)
- overlay_image_alpha(res, frame, fg_offset, fgmask)
- outcam.schedule_frame(res)
- if args.show:
- # Show it flipped just for our own view.
- if show == "m":
- cv2.imshow("chroma", cv2.flip(fgmask, 1))
- else:
- cv2.imshow("chroma", cv2.flip(res, 1))
- try:
- key = chr(cv2.waitKey(1))
- except ValueError:
- key = "\0"
- if key == "q":
- break
- elif key in "mo":
- show = key
- cap.release()
- cv2.destroyAllWindows()
- # Restore what I assume is the default..
- subprocess.Popen(["v4l2-ctl", "-d", args.camera, "-c", "exposure_auto=3"]).communicate()