<?xml version="1.0"?>
<feed xmlns="http://www.w3.org/2005/Atom" xml:lang="en">
	<id>https://entorb.net//wiki/index.php?action=history&amp;feed=atom&amp;title=Python_-_CV2</id>
	<title>Python - CV2 - Revision history</title>
	<link rel="self" type="application/atom+xml" href="https://entorb.net//wiki/index.php?action=history&amp;feed=atom&amp;title=Python_-_CV2"/>
	<link rel="alternate" type="text/html" href="https://entorb.net//wiki/index.php?title=Python_-_CV2&amp;action=history"/>
	<updated>2026-05-06T10:27:23Z</updated>
	<subtitle>Revision history for this page on the wiki</subtitle>
	<generator>MediaWiki 1.43.1</generator>
	<entry>
		<id>https://entorb.net//wiki/index.php?title=Python_-_CV2&amp;diff=4839&amp;oldid=prev</id>
		<title>Torben at 20:20, 30 October 2024</title>
		<link rel="alternate" type="text/html" href="https://entorb.net//wiki/index.php?title=Python_-_CV2&amp;diff=4839&amp;oldid=prev"/>
		<updated>2024-10-30T20:20:55Z</updated>

		<summary type="html">&lt;p&gt;&lt;/p&gt;
&lt;p&gt;&lt;b&gt;New page&lt;/b&gt;&lt;/p&gt;&lt;div&gt;[[Category:Coding]][[Category:Python]]&lt;br /&gt;
Image Regocnition Using CV2 / OpenCV&lt;br /&gt;
===Basics===&lt;br /&gt;
 import cv2  # c:\Python\Scripts\pip install opencv-python&lt;br /&gt;
 &lt;br /&gt;
 # Read Template Image&lt;br /&gt;
 img_template = cv2.imread(&amp;#039;templates/template.png&amp;#039;)&lt;br /&gt;
 # Show Image&lt;br /&gt;
 cv2.imshow(&amp;quot;Template &amp;quot;, img_template ) ; cv2.waitKey(0)&lt;br /&gt;
&lt;br /&gt;
====Modify Images====&lt;br /&gt;
 # convert image to grayscale&lt;br /&gt;
 img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)&lt;br /&gt;
 # detect edges &lt;br /&gt;
 img_edged = cv2.Canny(img_gray, 50, 200)&lt;br /&gt;
&lt;br /&gt;
===Use Screenshots=== &lt;br /&gt;
 def takeScreenshot():&lt;br /&gt;
    import pyautogui # c:\Python\Scripts\pip install pyautogui&lt;br /&gt;
    # ...&lt;br /&gt;
    # Attention: supports only screenshots of monitor#1&lt;br /&gt;
    screenshot = pyautogui.screenshot()&lt;br /&gt;
    # screenshot = pyautogui.screenshot(region=(screenshotX,screenshotY, screenshotW, screenshotH))&lt;br /&gt;
    # Convert to numpy array&lt;br /&gt;
    screenshot = np.array(screenshot) &lt;br /&gt;
    # Convert RGB to BGR &lt;br /&gt;
    screenshot = screenshot[:, :, ::-1].copy()&lt;br /&gt;
    return screenshot&lt;br /&gt;
&lt;br /&gt;
===Match Images===&lt;br /&gt;
 # often gray scaled images are used for faster processing&lt;br /&gt;
 # many people use edge representation instead of original images&lt;br /&gt;
 # convert image to grayscale&lt;br /&gt;
 img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)&lt;br /&gt;
 # detect edges &lt;br /&gt;
 img_edged = cv2.Canny(img_gray, 50, 200)&lt;br /&gt;
 def matchImages(img_edged, template_edged, threshold = 0.8) :&lt;br /&gt;
    x = -1&lt;br /&gt;
    y = -1&lt;br /&gt;
    # result = cv2.matchTemplate(screenshotGray, template1, cv2.TM_CCOEFF)&lt;br /&gt;
    result = cv2.matchTemplate(screenshot, template, cv2.TM_CCOEFF_NORMED)&lt;br /&gt;
    (_, maxVal, _, maxLoc) = cv2.minMaxLoc(result)  &lt;br /&gt;
 &lt;br /&gt;
    &lt;br /&gt;
    loc = np.where( result &amp;gt;= threshold)&lt;br /&gt;
    &lt;br /&gt;
    # Size of template&lt;br /&gt;
    (tH, tW) = template.shape[:2]&lt;br /&gt;
    &lt;br /&gt;
    # draw a bounding box around the detected region&lt;br /&gt;
    clone = np.dstack([screenshot, screenshot, screenshot])&lt;br /&gt;
    numHits = 0&lt;br /&gt;
    for pt in zip(*loc[::-1]):&lt;br /&gt;
        numHits += 1&lt;br /&gt;
        # print (pt[0] , &amp;quot; , &amp;quot;, pt[1] )&lt;br /&gt;
        if (numHits == 1):&lt;br /&gt;
            # return coordinates of 1st match&lt;br /&gt;
            x = int(pt[0] + tW/2)&lt;br /&gt;
            y = int(pt[1] + tH/2)&lt;br /&gt;
        cv2.rectangle(clone, pt, (pt[0] + tW, pt[1] + tH), (0,0,255), 2)&lt;br /&gt;
    &lt;br /&gt;
    if (numHits &amp;gt; 0):&lt;br /&gt;
        print (str(numHits) + &amp;quot; Treffer&amp;quot;)&lt;br /&gt;
    cv2.imshow(str(numHits) + &amp;quot; Treffer&amp;quot;, clone) ; cv2.waitKey(0)    &lt;br /&gt;
    return x,y&lt;br /&gt;
&lt;br /&gt;
===Match Images Exactly===&lt;br /&gt;
(usind numpy, not CV2)&lt;br /&gt;
 def matchImagesExact(im, tpl):&lt;br /&gt;
 # https://stackoverflow.com/questions/29663764/determine-if-an-image-exists-within-a-larger-image-and-if-so-find-it-using-py&lt;br /&gt;
    im = np.atleast_3d(im)&lt;br /&gt;
    tpl = np.atleast_3d(tpl)&lt;br /&gt;
    H, W, D = im.shape[:3]&lt;br /&gt;
    h, w = tpl.shape[:2]&lt;br /&gt;
 &lt;br /&gt;
    # Integral image and template sum per channel&lt;br /&gt;
    sat = im.cumsum(1).cumsum(0)&lt;br /&gt;
    tplsum = np.array([tpl[:, :, i].sum() for i in range(D)])&lt;br /&gt;
 &lt;br /&gt;
    # Calculate lookup table for all the possible windows&lt;br /&gt;
    iA, iB, iC, iD = sat[:-h, :-w], sat[:-h, w:], sat[h:, :-w], sat[h:, w:] &lt;br /&gt;
    lookup = iD - iB - iC + iA&lt;br /&gt;
    # Possible matches&lt;br /&gt;
    possible_match = np.where(np.logical_and.reduce([lookup[..., i] == tplsum[i] for i in range(D)]))&lt;br /&gt;
 &lt;br /&gt;
    # Find exact match&lt;br /&gt;
    for y, x in zip(*possible_match):&lt;br /&gt;
        if np.all(im[y+1:y+h+1, x+1:x+w+1] == tpl):&lt;br /&gt;
            return (x+1+w/2 , y+1+h/2)&lt;br /&gt;
    return -1,-1&lt;br /&gt;
&lt;br /&gt;
===Color Filter===&lt;br /&gt;
 # Convert RGB/BGR to HSV representation of image&lt;br /&gt;
 # https://docs.opencv.org/3.2.0/df/d9d/tutorial_py_colorspaces.html&lt;br /&gt;
 green = np.uint8([[[102,255,102 ]]]) # BGR obtained from Gimp&lt;br /&gt;
 hsv_green = cv2.cvtColor(green,cv2.COLOR_BGR2HSV)&lt;br /&gt;
 # print (hsv_green)&lt;br /&gt;
 # 102,255,102 -&amp;gt; 60 153 255&lt;br /&gt;
 # Now you take [H-10, 100,100] and [H+10, 255, 255] as lower bound and upper bound respectively.&lt;br /&gt;
 lower_green = np.array([50,100,100])&lt;br /&gt;
 upper_green = np.array([70,255,255])&lt;br /&gt;
 &lt;br /&gt;
 # Load Image&lt;br /&gt;
 image = cv2.imread(&amp;#039;image.png&amp;#039;)&lt;br /&gt;
 # Convert BGR to HSV&lt;br /&gt;
 img_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)&lt;br /&gt;
 # Threshold the HSV image to get only green colors&lt;br /&gt;
 img_mask = cv2.inRange(img_hsv, lower_green, upper_green)&lt;br /&gt;
 # Bitwise-AND mask and original image&lt;br /&gt;
 img_res = cv2.bitwise_and(image,image, mask=img_mask)&lt;br /&gt;
 # invert Mask&lt;br /&gt;
 img_mask_inv = cv2.bitwise_not(img_mask)&lt;br /&gt;
 &lt;br /&gt;
 # Show results&lt;br /&gt;
 cv2.imshow(&amp;#039;image&amp;#039;,image)&lt;br /&gt;
 cv2.imshow(&amp;#039;mask&amp;#039;,img_mask)&lt;br /&gt;
 cv2.imshow(&amp;#039;img_mask_inv&amp;#039;,img_mask_inv)&lt;br /&gt;
 cv2.imshow(&amp;#039;res&amp;#039;,img_res)&lt;br /&gt;
 cv2.waitKey(0)&lt;/div&gt;</summary>
		<author><name>Torben</name></author>
	</entry>
</feed>