diff --git a/robots-at-robots/research/robots_at_robots/armor_color/find_contour/blobEdgeDetector.py b/robots-at-robots/research/robots_at_robots/armor_color/find_contour/blobEdgeDetector.py index d4e8968c39f70bb7a3a4abc8e6c211f0b50ff5f9..3894a35393ced3ca391a76a550f17346f26c890c 100644 --- a/robots-at-robots/research/robots_at_robots/armor_color/find_contour/blobEdgeDetector.py +++ b/robots-at-robots/research/robots_at_robots/armor_color/find_contour/blobEdgeDetector.py @@ -6,9 +6,7 @@ Colors = importlib.import_module(".greyBlobDetector", "Colors") GreyBlobDetector = importlib.import_module(".greyBlobDetector", "GreyBlobDetector") -# edge detection with two 3x3 matrices (Sobel horizontal and vertical filters) -# since the color is far more pronounced at the edges it may be worth trying -class SobelEdgeDetector: +class ContourEdgeDetector: # HSV value for RED RED_THRESHOLD = 8 # HSV value for BLUE @@ -18,44 +16,35 @@ class SobelEdgeDetector: pass def detect_color(self, image, gaussian_blur = False): - blobs = GreyBlobDetector.detect_color(image) - # gaussian blur is basically there to reduce the noise if gaussian_blur: # apply gaussian blur here - pass - + image = cv.GaussianBlur(image,(5,5),0) + gd = GreyBlobDetector() + contours = gd.detect_color(image) - if blobs == None: + if contours == None: return Colors.GREY else: # we know that its either blue or red - # apply Canny edge and use it as a mask - pass + # use the edges as a mask + mask = np.zeros(image.shape[0], image.shape[1]) + cv.drawContours(mask, contours, -1, (255), 1) + # applies the mask to image + image[mask != 0] = (0,0,0) + # transform this to hsv + hsv = cv.cvtColor(image, cv.COLOR_BGR2HSV) + # note : 0,0,0 will still be 0,0,0 + colors = hsv[:,:,0] + average = colors.sum() / (colors != 0).sum() + + if abs(average - ContourEdgeDetector.RED_THRESHOLD) \ + < abs(average - ContourEdgeDetector.BLUE_THRESHOLD): + return Colors.RED + else: + return Colors.BLUE -# this uses the Canny edge detector from OpenCV. The difference with Sobel is that -# while it takes longer it detects less "fake" edges -class CannyEdgeHSVDetector: - # HSV value for RED - RED_THRESHOLD = 8 - # HSV value for BLUE - BLUE_THRESHOLD = 230 - def __init__(self): - pass - def detect_color(self, image, gaussian_blur = False): - # gaussian blur is basically there to reduce the noise - if gaussian_blur: - # apply gaussian blur here - pass - - blobs = GreyBlobDetector.detect_color(image) - if blobs == None: - return Colors.GREY - else: - # we know that its either blue or red - # apply Canny edge and use it as a mask - pass diff --git a/robots-at-robots/research/robots_at_robots/armor_color/find_contour/greyBlobDetector.py b/robots-at-robots/research/robots_at_robots/armor_color/find_contour/greyBlobDetector.py index 7c7a93fd77f1eda3ca0e7a3aa4082bae8e244ed8..6fd5b46b0d61d6da88e2a5d3254cf5f0f698ca8a 100644 --- a/robots-at-robots/research/robots_at_robots/armor_color/find_contour/greyBlobDetector.py +++ b/robots-at-robots/research/robots_at_robots/armor_color/find_contour/greyBlobDetector.py @@ -2,6 +2,7 @@ import numpy as np import cv2 as cv import matplotlib.pyplot as plt +PI = 3.141592653589793238 class Colors: RED = "RED" @@ -21,6 +22,15 @@ class GreyBlobDetector: CONVEX_THRESHOLD = 0.99 # threshold for how elongated it is (called intertia in opencv), TBD (circle = 1) INERTIA_THRESHOLD = 0.2 + # unless a robot got flipped, the lines should be pretty vertical + ANGLE_THRESHOLD = 30 + + # these parameters are for the shape of the image, not sure if needed or not + WIDTH_RATIO = 0.2 # width LED / width image, TBD + HEIGHT_RATIO = 0.67 # height LED / height image, TBD + DIST_LEDS_RATIO = 0.67 # distance of the 2 LEDs / width of image, TBD + + def __init__(self): pass @@ -30,7 +40,7 @@ class GreyBlobDetector: # false by default unless the ai that finds the armors is consistant enough with the # picture horizontal and vertical ratio x, y = image.shape - return (x/5)*(y/0.67) # 5 and 0.67 are to be determined + return (x * GreyBlobDetector.WIDTH_RATIO)*(y * GreyBlobDetector.HEIGHT_RATIO) def calculate_min_area(self, image): # needed here becase part of the vertical bars may be hidden @@ -39,9 +49,9 @@ class GreyBlobDetector: def calculate_distance(self, image): # only use this if there are 2 or more blobs detected x, y = image.shape - return y * 0.67 # TBD - + return y * GreyBlobDetector.DIST_LEDS_RATIO # TBD + # finds the contours of the LEDs and returns them def detect_color(self, image, area = False, distance = False, already_grayscale = False): # transform the image into black and white # first go to grayscale then goto black white with a threshold @@ -50,58 +60,32 @@ class GreyBlobDetector: grey = cv.cvtColor(image, cv.COLOR_BGR2GRAY) black_white = cv.threshold(grey, GreyBlobDetector.WHITE_THRESHOLD, 255, cv.THRESH_BINARY)[1] - # image is a numpy array of the original image - params = cv.SimpleBlobDetector_Params() - - ######################################################## - # only for test purposes. not supposed to be there. # - ######################################################## - - testparam = cv.SimpleBlobDetector_Params() - testparam.filterByArea = False - testparam.filterByCircularity = False - testparam.filterByColor = False - testparam.filterByConvexity = False - testparam.filterByInertia = False - - testdetector = cv.SimpleBlobDetector_create(testparam) - blobs = testdetector.detect(black_white) - blank = np.zeros(image.shape) - mask = cv.drawKeypoints(black_white, blobs, blank, (0, 0, 0), \ - cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) - cv.imshow("mask", mask) - cv.waitKey(0) - cv.destroyAllWindows() - print(blobs) - input() - ######################################################## - # only for test purposes. not supposed to be there. # - ######################################################## - - # Filter by Circularity - params.filterByCircularity = True - params.minCircularity = GreyBlobDetector.CIRCLE_THRESHOLD - - # Filter by Convexity - params.filterByConvexity = True - params.minConvexity = GreyBlobDetector.CONVEX_THRESHOLD - - # Filter by Inertia - params.filterByInertia = True - params.minInertiaRatio = GreyBlobDetector.INERTIA_THRESHOLD - - - -class Test: - def __init__(self): - pass - def test(self, a, b=0, c=1): - print(a, b, c) - - -if __name__ == "__main__": - - detector = GreyBlobDetector() - image = cv.imread('test.png',0) - mask = detector.detect_color(image, already_grayscale=True) - cv.imshow("mask", mask) \ No newline at end of file + # do find countours on the black white image + contours, hierarchy = cv.findContours(black_white, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE) + # note : other versions of OpenCV will require image, contours, hierarchy + # now for the characteristics of the contours + retained_contours = [] + for contour in contours: + area = cv.contourArea(contour) + circularity = 4*PI*area / (len(contour) * len(contour)) # there gotta be a better way to write this ... + convexity = area / cv.contourArea(cv.convexHull(contour)) + # opencv docs aren't very helpful on this but looks like inertia is aspect ratio + (x,y),(width,height),theta = cv.minAreaRect(contour) + inertia = width / height + + if circularity < GreyBlobDetector.CIRCLE_THRESHOLD and \ + convexity > GreyBlobDetector.CONVEX_THRESHOLD and \ + inertia < GreyBlobDetector.INERTIA_THRESHOLD and \ + theta < GreyBlobDetector.ANGLE_THRESHOLD: + retained_contours.append(contour) + if len(retained_contours) == 0: + # no contour should be retained --> all LEDs are off + # the other detectors will return Grey + return None + else: + # these are the contours that supposedly represents the LEDs + return retained_contours + +if __name__ == '__main__': + image = cv.imread("test.png") + \ No newline at end of file