Determine orientation of an product for pick and place
I'm trying to detect the orientation of products, so I can use this orientation for a pick and place sytem. What i have so far:
- I can detect the contour of the poduct
- I can calculate the center of the contour
- I can calculate the angle by fitting an elipse over the contour, however the outcome is not stable
The problem is determin the angle, since the products are on the upper side and bottom side alsmost idetical mass wise. The calculation of the angle by fitting a elipse is not stable. sometimes the vector points left and sometimes right. As shown on the following picture, you can see that the drawn line of the angle is not always pointing in the same direction.
Does somebody have an idea how i can make sure the calculation of angle(orientation) is 100% correct.
Attached you can find the sample picture.
Here is my code so far:
import cv2
import numpy as np
import math
# read the image
cap = cv2.imread("20190909_170137.jpg")
def nothing(x):
pass
# create slider
cv2.namedWindow("Trackbars")
hh='Max'
hl='Min'
wnd = 'Colorbars'
cv2.createTrackbar("threshold", "Trackbars", 150, 255, nothing)
cv2.createTrackbar("Houghlines", "Trackbars", 255, 255, nothing)
while True:
frame = cv2.imread("20190909_170137.jpg", cv2.IMREAD_COLOR)
scale_percent = 60 # percent of original size
width = int(frame.shape[1] * scale_percent / 100)
height = int(frame.shape[0] * scale_percent / 100)
dim = (width, height)
# resize image
frame = cv2.resize(frame, dim, interpolation = cv2.INTER_AREA)
# create sliders for variables
l_v = cv2.getTrackbarPos("threshold", "Trackbars")
u_v = cv2.getTrackbarPos("Houghlines", "Trackbars")
#convert frame to Black and White
bw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#convert Black and White to binary image
ret,thresh4 = cv2.threshold(bw,l_v,255,cv2.THRESH_BINARY)
#find the contours in thresh4
im2, contours, hierarchy = cv2.findContours(thresh4, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
#calculate with contour
for contour in contours:
#calculate area and moment of each contour
area = cv2.contourArea(contour)
M = cv2.moments(contour)
if M["m00"] > 0:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
#Use contour if size is bigger then 1000 and smaller then 50000
if area > 1000:
if area <50000:
approx = cv2.approxPolyDP(contour, 0.001*cv2.arcLength(contour, True), True)
#draw contour
cv2.drawContours(frame, contour, -1, (0, 255, 0), 3)
#draw circle on center of contour
cv2.circle(frame, (cX, cY), 7, (255, 255, 255), -1)
perimeter = cv2.arcLength(contour,True)
approx = cv2.approxPolyDP(contour, 0.04 * perimeter, True)
#fit elipse
_ ,_ ,angle = cv2.fitEllipse(contour)
P1x = cX
P1y = cY
length = 35
#calculate vector line at angle of bounding box
P2x = int(P1x + length * math.cos(math.radians(angle)))
P2y = int(P1y + length * math.sin(math.radians(angle)))
#draw vector line
cv2.line(frame,(cX, cY),(P2x,P2y),(255,255,255),5)
#output center of contour
print (P1x , P2y, angle)
#detect bounding box
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
#draw bounding box
cv2.drawContours(frame, [box],0,(0,0,255),2)
#Detect Hull ...
could you provide input image
Input image has been added, somehow it didn't work as an attachment
take a look at https://answers.opencv.org/question/7... and https://docs.opencv.org/master/dd/ddc...