Untitled

From Buff Marten, 4 Years ago, written in Plain Text, viewed 814 times.
URL https://p.gaa.st/view/a17de799 Embed
Download Paste or View Raw
  1. wilmer@veer:~/src/chroma$ cat test.py
  2. #!/usr/bin/env python3
  3.  
  4. VID = "/dev/v4l/by-id/usb-046d_HD_Pro_Webcam_C920_DDFB217F-video-index0"
  5. BACK = "back.mp4"
  6. RES = (640, 360)  # VGA but 16:9
  7. LOOPBACK = "/dev/video6"
  8.  
  9. import argparse
  10. import subprocess
  11.  
  12. import numpy as np
  13. import cv2
  14.  
  15. import pyfakewebcam
  16.  
  17.  
  18. parser = argparse.ArgumentParser(description="Chroma key using OpenCV background subtraction")
  19. parser.add_argument("-b", "--background", help="Background image or animation", default=BACK)
  20. parser.add_argument("-c", "--camera", help="Camera device", default=VID)
  21. parser.add_argument("-r", "--resolution", help="Capture resolution", default=RES, nargs=2, type=int, metavar=("W", "H"))
  22. parser.add_argument("-l", "--loopback", help="v4l2loopback device", default=LOOPBACK)
  23. parser.add_argument("-s", "--show", help="Show result in own window", action="store_true")
  24. args = parser.parse_args()
  25.  
  26.  
  27. cap = cv2.VideoCapture(args.camera)
  28. cap.set(3, args.resolution[0])
  29. cap.set(4, args.resolution[1])
  30. fgbg = cv2.createBackgroundSubtractorMOG2(history=50, detectShadows=False)
  31.  
  32. subprocess.Popen(["v4l2-ctl", "-d", args.camera, "-c", "exposure_auto=3"]).communicate()
  33.  
  34. # Let camera do auto-exposure, and let MOG2 do its learning.
  35. for i in range(25):
  36.         ret, frame = cap.read()
  37.         fgmask = fgbg.apply(frame, learningRate=1)
  38.  
  39. subprocess.Popen(["v4l2-ctl", "-d", args.camera, "-c", "exposure_auto=1"]).communicate()
  40.  
  41. back_still = cv2.imread(args.background)
  42. if isinstance(back_still, np.ndarray):
  43.         back = back_still
  44.         back_move = None
  45. else:
  46.         back_move = cv2.VideoCapture(args.background)
  47.         _, back = back_move.read()  # Waste the first frame, sorry!
  48.  
  49. print(back.shape)
  50. back_res = tuple(back.shape[1::-1])
  51. back_ratio = back_res[0] / back_res[1]
  52. print(back_ratio)
  53. if back_ratio >= (args.resolution[0] / args.resolution[1]):
  54.         # background (therefore result we'll dump on loopback) is wider than input.
  55.         # equal height for both, paste camera at non-0 x.
  56.         out_res = (int(args.resolution[1] * back_ratio) & ~1, args.resolution[1])
  57.         fg_offset = (int((out_res[0] - args.resolution[0]) / 2), 0)
  58. else:
  59.         # background is narrower than input. paste camera at bottom, non-0 y.
  60.         out_res = (args.resolution[0], int(args.resolution[0] / back_ratio) & ~1)
  61.         fg_offset = (0, out_res[1] - args.resolution[1])
  62.  
  63. print(back_res)
  64. print(back_ratio)
  65. print(out_res)
  66. print(fg_offset)
  67.  
  68. if isinstance(back_still, np.ndarray):  # dumbfuck numpy doesn't like being treated as bool.
  69.         back_still = cv2.resize(back_still, out_res)
  70.  
  71. outcam = pyfakewebcam.FakeWebcam(args.loopback, *out_res)
  72.  
  73. #os.exit(1)
  74.  
  75. face_finder = cv2.CascadeClassifier("face.xml")
  76.  
  77.  
  78. def overlay_image_alpha(img, img_overlay, pos, alpha_mask):
  79.         """Overlay img_overlay on top of img at the position specified by
  80.         pos and blend using alpha_mask.
  81.  
  82.         Alpha mask must contain values within the range [0, 1] and be the
  83.         same size as img_overlay.
  84.         """
  85.  
  86.         x, y = pos
  87.  
  88.         # Image ranges
  89.         y1, y2 = max(0, y), min(img.shape[0], y + img_overlay.shape[0])
  90.         x1, x2 = max(0, x), min(img.shape[1], x + img_overlay.shape[1])
  91.  
  92.         # Overlay ranges
  93.         y1o, y2o = max(0, -y), min(img_overlay.shape[0], img.shape[0] - y)
  94.         x1o, x2o = max(0, -x), min(img_overlay.shape[1], img.shape[1] - x)
  95.  
  96.         # Exit if nothing to do
  97.         if y1 >= y2 or x1 >= x2 or y1o >= y2o or x1o >= x2o:
  98.                 return
  99.  
  100.         channels = img.shape[2]
  101.  
  102.         alpha = alpha_mask[y1o:y2o, x1o:x2o]
  103.         alpha_inv = 1.0 - alpha
  104.  
  105.         for c in range(channels):
  106.                 img[y1:y2, x1:x2, c] = (alpha * img_overlay[y1o:y2o, x1o:x2o, c] + alpha_inv * img[y1:y2, x1:x2, c])
  107.  
  108.  
  109. show = "o"  # show Output
  110.  
  111. while(1):
  112.         ret, frame = cap.read()
  113.  
  114.         fgmask = fgbg.apply(frame, learningRate=0)
  115.  
  116.         if False:
  117.                 mask = np.zeros(fgmask.shape)
  118.                
  119.                 bw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
  120.                 faces = face_finder.detectMultiScale(bw, 1.1, 4)
  121.                 for face in faces:
  122.                         x1, y1 = face[0:2]
  123.                         x2, y2 = (x1 + face[2]), (y1 + face[3])
  124.                         cv2.rectangle(fgmask, (x1, y1), (x2, y2), 1, 0)
  125.                         #cv2.rectangle(fgmask, (x1, y1), (x2, y2), 1, 0)
  126.  
  127.  
  128.                 #fgmask = fgmask * mask
  129.        
  130.         fgmask = fgmask / 255.0
  131.        
  132.         fgmask = cv2.GaussianBlur(fgmask, (11, 11), 0)
  133.         ret, fgmask = cv2.threshold(fgmask, .6, 1, cv2.THRESH_TOZERO)
  134.  
  135.         if back_move:
  136.                 ret = False
  137.                 while not ret:
  138.                         ret, back = back_move.read()
  139.                         if not ret:
  140.                                 back_move = cv2.VideoCapture(args.background)
  141.                 res = cv2.resize(back, out_res)
  142.         else:
  143.                 res = np.copy(back_still)
  144.        
  145.         overlay_image_alpha(res, frame, fg_offset, fgmask)
  146.  
  147.         outcam.schedule_frame(res)
  148.         if args.show:
  149.                 # Show it flipped just for our own view.
  150.                 if show == "m":
  151.                         cv2.imshow("chroma", cv2.flip(fgmask, 1))
  152.                 else:
  153.                         cv2.imshow("chroma", cv2.flip(res, 1))
  154.  
  155.         try:
  156.                 key = chr(cv2.waitKey(1))
  157.         except ValueError:
  158.                 key = "\0"
  159.        
  160.         if key == "q":
  161.                 break
  162.         elif key in "mo":
  163.                 show = key
  164.  
  165.  
  166. cap.release()
  167. cv2.destroyAllWindows()
  168.  
  169. # Restore what I assume is the default..
  170. subprocess.Popen(["v4l2-ctl", "-d", args.camera, "-c", "exposure_auto=3"]).communicate()
  171.  

Reply to "Untitled"

Here you can reply to the paste above