MECARUCO: mechanics & aruco

MecAruco is a student project at Polytech Annecy Chambéry which aims at using image processing and especially Aruco markers to help teaching mechanical engineering.

Notebooks

Notebooks

Aruco

Note

This notebook can be downloaded here: Aruco_detection-Tvec.ipynb

import numpy as np
import cv2
import cv2.aruco as aruco
import math
import random
import sys
#import time
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
#print(aruco_dict)
# second parameter is id number
# last parameter is total image size
img = aruco.drawMarker(aruco_dict, 2, 700)
cv2.imwrite("test_marker.jpg", img)

#cv2.imshow('frame',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
posorigine =[]
cap = cv2.VideoCapture(0)
mtx=np.array([[ 736.72620104,    0.        ,  335.09873285],
       [   0.        ,  784.55469771,  288.37183538],
       [   0.        ,    0.        ,    1.        ]])

dist=np.array([[  1.80626027e-01],
       [ -6.41707400e-01],
       [  5.59047400e-03],
       [  1.71301917e-03],
       [ -2.57102334e+00],
       [  6.96846440e-02],
       [ -4.08903572e-01],
       [ -2.89017255e+00],
       [  0.00000000e+00],
       [  0.00000000e+00],
       [  0.00000000e+00],
       [  0.00000000e+00],
       [  0.00000000e+00],
       [  0.00000000e+00]] )

#cv2.namedWindow("truc", cv2.WND_PROP_FULLSCREEN)
#cv2.resizeWindow('truc', 1200,1200)
#cv2.SetWindowProperty("truc",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)
cv2.namedWindow("truc", cv2.WND_PROP_FULLSCREEN)
cv2.setWindowProperty("truc",cv2.WND_PROP_FULLSCREEN,cv2.WINDOW_FULLSCREEN)

while(True):
    # Capture frame-by-frame
    ret, frame = cap.read()
    #print(frame.shape) #480x640
    # Our operations on the frame come here
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters =  aruco.DetectorParameters_create()

    #print(parameters)

    '''    detectMarkers(...)
        detectMarkers(image, dictionary[, corners[, ids[, parameters[, rejectedI
        mgPoints]]]]) -> corners, ids, rejectedImgPoints
        '''
        #lists of ids and the corners beloning to each id
    corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
    #print(corners[0][0][0])
    size_of_marker =  0.0202 # side lenght of the marker in meter
    if len(corners)>0:
        rvecs,tvecs, trash = aruco.estimatePoseSingleMarkers(corners, size_of_marker, mtx, dist)
        length_of_axis = 0.01
        imaxis = aruco.drawDetectedMarkers(frame, corners, ids)

        for i in range(len(tvecs)):
            imaxis = aruco.drawAxis(imaxis, mtx, dist, rvecs[i], tvecs[i], length_of_axis)

        #It's working.
        # qqmy problem was that the cellphone put black all around it. The alrogithm
        # depends very much upon finding rectangular black blobs

        frame = aruco.drawDetectedMarkers(frame, corners)
        """for i in range(len(ids)):
            if ids[i]== 1:"""
        if posorigine != []:
            for i in ids:
                if i in trackedIds:
                    orgcorners = posorigine[list(trackedIds).index(i)]
                    newcorners = corners[list(ids).index(i)]
                #ax,ay,xmarkers,by,bx,deltax,deltay = calcul(corners,i)
                    cv2.line(frame,(orgcorners[0][0][0],orgcorners[0][0][1]),
                             (newcorners[0][0][0], newcorners[0][0][1]),(255,255,255),5)

          #deltax = float(int((abs(ax-bx)*2.81/xmarkers)*100)/100)
                    orgcornersbis = posoriginetvecs[list(trackedIds).index(i)]
                    newcornersbis = tvecs[list(ids).index(i)]
                    cal = np.sqrt((newcornersbis[0][0]-orgcornersbis[0][0])**2+(newcornersbis[0][1]-orgcornersbis[0][1])**2)*1000
                    cal = float(int(cal*100))/100


                    cv2.putText(frame,'x'+str(i)+':'+ str(cal),(0,100+25*i),cv2.FONT_HERSHEY_SIMPLEX,1,(255,255,255),2)

        #print(rejectedImgPoints)
        # Display the resulting frame

        cv2.imshow('truc',frame)
        cv2.imshow('truc',imaxis)
    else:
        cv2.imshow('truc',frame)
    if cv2.waitKey(1) & 0xFF == ord('a'):
        posorigine = corners
        posoriginetvecs = tvecs
        trackedIds = ids
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
tvecs
---------------------------------------------------------------------------

NameError                                 Traceback (most recent call last)

<ipython-input-1-f5df3c737808> in <module>()
----> 1 tvecs


NameError: name 'tvecs' is not defined
cap.release()
def calc_square(numbers):
    for n in numbers:
        print('square ' + str(n*n))

def calc_cube(numbers):
    for n in numbers:
        print('cube ' + str(n*n*n))

if __name__ == "__main__":
    arr = [2,3,8]
    p1 = multiprocessing.Process(target=calc_square, args=(arr,))
    p2 = multiprocessing.Process(target=calc_cube, args=(arr,))

    p1.start()
    p2.start()

    p1.join()
    p2.join()


    print(calc_square(arr),"Done!")
ids
ids = [4,5,2]
ids
ids.index(5)
d = dict()

Note

This notebook can be downloaded here: Aruco_detection_direct.ipynb

import numpy as np
import matplotlib.pyplot as plt
import cv2
import cv2.aruco as aruco
import time
%matplotlib tk # inline, nbagg
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
img = aruco.drawMarker(aruco_dict, 2, 700)
cv2.imwrite("test_marker.jpg", img)

angle=[]
cv2.waitKey(0)
cv2.destroyAllWindows()

cap = cv2.VideoCapture(0)

size_of_marker =  0.045

while(True):

    ret, frame = cap.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters =  aruco.DetectorParameters_create()



    '''    detectMarkers(...)
        detectMarkers(image, dictionary[, corners[, ids[, parameters[, rejectedI
        mgPoints]]]]) -> corners, ids, rejectedImgPoints
        '''
    size_of_marker =  0.045
    corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)

    frame = aruco.drawDetectedMarkers(frame, corners)

    cv2.imshow('frame',frame)
    imsize = gray.shape

    dist = np.zeros((5,1))

    mtx = np.array([[ 2000.,    0., imsize[0]/2.],
                    [    0., 2000., imsize[1]/2.],
                    [    0.,    0.,           1.]])

    rvecs,tvecs, trash = aruco.estimatePoseSingleMarkers(corners, size_of_marker, mtx, dist )

    angle.append(rvecs)


    if cv2.waitKey(1) & 0xFF == ord('q'):
        break


        # When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
t=time.sleep
    plt.xlabel('temps')
    plt.ylabel('angle')
    plt.plot(t,angle)
    plt.show()
plt.figure()
plt.plot(np.random.rand(10), "or-")
plt.show()

Note

This notebook can be downloaded here: Aruco_detection_direct_courbe_angle.ipynb

import numpy as np
import matplotlib.pyplot as plt
import cv2
import cv2.aruco as aruco
import time
import pandas as pd
%matplotlib tk
# inline, nbagg
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
img = aruco.drawMarker(aruco_dict, 2, 700)
cv2.imwrite("test_marker.jpg", img)

angle=[]
cv2.waitKey(0)
cv2.destroyAllWindows()

cap = cv2.VideoCapture(0)

size_of_marker =  0.045

while(True):

    ret, frame = cap.read()
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
    parameters =  aruco.DetectorParameters_create()



    '''    detectMarkers(...)
        detectMarkers(image, dictionary[, corners[, ids[, parameters[, rejectedI
        mgPoints]]]]) -> corners, ids, rejectedImgPoints
        '''
    size_of_marker =  0.045
    corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)

    frame = aruco.drawDetectedMarkers(frame, corners)

    cv2.imshow('frame',frame)
    imsize = gray.shape

    dist = np.zeros((5,1))

    mtx = np.array([[ 2000.,    0., imsize[0]/2.],
                    [    0., 2000., imsize[1]/2.],
                    [    0.,    0.,           1.]])

    rvecs,tvecs, trash = aruco.estimatePoseSingleMarkers(corners, size_of_marker, mtx, dist )

    angle.append(rvecs)

    angle

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break


        # When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
dt = 0.02 # Temps entre deux image
Nframe = len(angle)
t = np.arange(Nframe) * dt
R = np.zeros((Nframe, 3))
for i in range(Nframe):
    if angle[i] is None:
        R[i,:] = np.nan
    else:
        R[i,:] = angle[i]
out =  pd.DataFrame()
out["t"] = t
out["Rx"] = np.degrees(R[:, 0])
out["Ry"] = np.degrees(R[:, 1])
out["Rz"] = np.degrees(R[:, 2])
out.index.name = "frame"
out
t Rx Ry Rz
frame
0 0.00 NaN NaN NaN
1 0.02 NaN NaN NaN
2 0.04 NaN NaN NaN
3 0.06 NaN NaN NaN
4 0.08 NaN NaN NaN
5 0.10 NaN NaN NaN
6 0.12 NaN NaN NaN
7 0.14 NaN NaN NaN
8 0.16 NaN NaN NaN
9 0.18 NaN NaN NaN
10 0.20 NaN NaN NaN
11 0.22 NaN NaN NaN
12 0.24 NaN NaN NaN
13 0.26 NaN NaN NaN
14 0.28 NaN NaN NaN
15 0.30 NaN NaN NaN
16 0.32 NaN NaN NaN
17 0.34 NaN NaN NaN
18 0.36 NaN NaN NaN
19 0.38 NaN NaN NaN
20 0.40 NaN NaN NaN
21 0.42 NaN NaN NaN
22 0.44 NaN NaN NaN
23 0.46 NaN NaN NaN
24 0.48 NaN NaN NaN
25 0.50 NaN NaN NaN
26 0.52 NaN NaN NaN
27 0.54 NaN NaN NaN
28 0.56 NaN NaN NaN
29 0.58 NaN NaN NaN
... ... ... ... ...
548 10.96 112.616365 123.675515 -32.376848
549 10.98 110.689358 125.841417 -32.226566
550 11.00 109.985899 126.922315 -29.990246
551 11.02 112.962763 129.407285 -28.748873
552 11.04 111.537820 127.950517 -33.088528
553 11.06 108.779533 127.127758 -32.424396
554 11.08 108.748064 127.543609 -30.340518
555 11.10 106.893078 126.890596 -30.733698
556 11.12 107.203824 128.077016 -31.727598
557 11.14 106.673280 128.877379 -34.661211
558 11.16 106.045870 129.255417 -34.820648
559 11.18 106.156150 130.859854 -32.891079
560 11.20 107.553999 131.699757 -31.837569
561 11.22 107.274529 130.818850 -32.168542
562 11.24 106.388770 129.698420 -34.417733
563 11.26 105.204035 128.819671 -35.438550
564 11.28 106.147819 129.701578 -34.835199
565 11.30 103.868823 127.923984 -34.372065
566 11.32 104.221034 128.070224 -33.716344
567 11.34 104.919628 128.773843 -34.580152
568 11.36 105.228644 128.442850 -34.669834
569 11.38 104.998862 127.552461 -35.476936
570 11.40 104.633700 127.194306 -35.719187
571 11.42 102.687253 126.837849 -34.200362
572 11.44 102.172307 126.291363 -34.012023
573 11.46 101.908830 127.839283 -34.460761
574 11.48 103.063066 129.827542 -33.316156
575 11.50 105.050463 132.241574 -30.550074
576 11.52 103.379668 128.878511 -30.982259
577 11.54 103.308638 128.442769 -32.294583

578 rows × 4 columns

plt.figure()
plt.plot(out.t, abs(out.Rz))
plt.show()
Rz = [a for a in Rz if a is not None]
Rz
---------------------------------------------------------------------------

NameError                                 Traceback (most recent call last)

<ipython-input-22-e027601dcd54> in <module>()
----> 1 Rz = [a for a in Rz if a is not None]
      2 Rz


NameError: name 'Rz' is not defined
plt.figure()
x = points[:, 1]
plt.plot(x,angle, "oscillations")
plt.show()
---------------------------------------------------------------------------

ValueError                                Traceback (most recent call last)

<ipython-input-16-9a9b9b3d9e9e> in <module>()
      1 plt.figure()
----> 2 plt.plot(np.random.rand(10),angle, "or-")
      3 plt.show()


R:\Anaconda3\lib\site-packages\matplotlib\pyplot.py in plot(*args, **kwargs)
   3259                       mplDeprecation)
   3260     try:
-> 3261         ret = ax.plot(*args, **kwargs)
   3262     finally:
   3263         ax._hold = washold


R:\Anaconda3\lib\site-packages\matplotlib\__init__.py in inner(ax, *args, **kwargs)
   1715                     warnings.warn(msg % (label_namer, func.__name__),
   1716                                   RuntimeWarning, stacklevel=2)
-> 1717             return func(ax, *args, **kwargs)
   1718         pre_doc = inner.__doc__
   1719         if pre_doc is None:


R:\Anaconda3\lib\site-packages\matplotlib\axes\_axes.py in plot(self, *args, **kwargs)
   1370         kwargs = cbook.normalize_kwargs(kwargs, _alias_map)
   1371
-> 1372         for line in self._get_lines(*args, **kwargs):
   1373             self.add_line(line)
   1374             lines.append(line)


R:\Anaconda3\lib\site-packages\matplotlib\axes\_base.py in _grab_next_args(self, *args, **kwargs)
    402                 this += args[0],
    403                 args = args[1:]
--> 404             for seg in self._plot_args(this, kwargs):
    405                 yield seg
    406


R:\Anaconda3\lib\site-packages\matplotlib\axes\_base.py in _plot_args(self, tup, kwargs)
    382             x, y = index_of(tup[-1])
    383
--> 384         x, y = self._xy_from_xy(x, y)
    385
    386         if self.command == 'plot':


R:\Anaconda3\lib\site-packages\matplotlib\axes\_base.py in _xy_from_xy(self, x, y)
    241         if x.shape[0] != y.shape[0]:
    242             raise ValueError("x and y must have same first dimension, but "
--> 243                              "have shapes {} and {}".format(x.shape, y.shape))
    244         if x.ndim > 2 or y.ndim > 2:
    245             raise ValueError("x and y can be no greater than 2-D, but have "


ValueError: x and y must have same first dimension, but have shapes (10,) and (958,)
Help on built-in function detectMarkers:

detectMarkers(...)
    detectMarkers(image, dictionary[, corners[, ids[, parameters[, rejectedImgPoints[, cameraMatrix[, distCoeff]]]]]]) -> corners, ids, rejectedImgPoints
    .   * @brief Basic marker detection
    .   *
    .   * @param image input image
    .   * @param dictionary indicates the type of markers that will be searched
    .   * @param corners vector of detected marker corners. For each marker, its four corners
    .   * are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
    .   * the dimensions of this array is Nx4. The order of the corners is clockwise.
    .   * @param ids vector of identifiers of the detected markers. The identifier is of type int
    .   * (e.g. std::vector<int>). For N detected markers, the size of ids is also N.
    .   * The identifiers have the same order than the markers in the imgPoints array.
    .   * @param parameters marker detection parameters
    .   * @param rejectedImgPoints contains the imgPoints of those squares whose inner code has not a
    .   * correct codification. Useful for debugging purposes.
    .   * @param cameraMatrix optional input 3x3 floating-point camera matrix
    .   * f$A = vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}f$
    .   * @param distCoeff optional vector of distortion coefficients
    .   * f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])f$ of 4, 5, 8 or 12 elements
    .   *
    .   * Performs marker detection in the input image. Only markers included in the specific dictionary
    .   * are searched. For each detected marker, it returns the 2D position of its corner in the image
    .   * and its corresponding identifier.
    .   * Note that this function does not perform pose estimation.
    .   * @sa estimatePoseSingleMarkers,  estimatePoseBoard
    .   *

Note

This notebook can be downloaded here: Projet+calibration-Paul.ipynb

import numpy as np
import cv2, PIL, os
from cv2 import aruco
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
%matplotlib nbagg
imagesFolder = "E:\Desktop\S8\Projet 851\data"

aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)

fig = plt.figure()
nx = 8
ny = 6
for i in range(1, nx*ny+1):
    ax = fig.add_subplot(ny,nx, i)
    img = aruco.drawMarker(aruco_dict,i-1, 700)
    plt.imshow(img, cmap = mpl.cm.gray, interpolation = "nearest")
    ax.axis("off")

plt.savefig(imagesFolder + "/markers.pdf")
plt.show()
#plt.close()
<IPython.core.display.Javascript object>
board = aruco.CharucoBoard_create(3, 3, 1, 0.8, aruco_dict)
imboard = board.draw((4000, 4000))
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.imshow(imboard, cmap = mpl.cm.gray, interpolation = "nearest")
ax.axis("off")
cv2.imwrite(imagesFolder + "/chessboard.tiff",imboard)
#plt.savefig(imagesFolder + "/chessboard.pdf")
plt.grid()
plt.show()
print("Imprimer le damier de calibration!")
<IPython.core.display.Javascript object>
Imprimer le damier de calibration!
import cv2
import math

videoFile = "E:/Desktop/S8/Projet 851/outpy.avi"
imagesFolder = "E:/Desktop/S8/Projet 851/data/"
cap = cv2.VideoCapture(videoFile)
frameRate = cap.get(5) #frame rate
while(cap.isOpened()):
    frameId = cap.get(1) #current frame number
    ret, frame = cap.read()
    if (ret != True):
        break
    if (frameId <150):
        filename = imagesFolder + "\image_" +  str(int(frameId)) + ".jpg"
        cv2.imwrite(filename, frame)
cap.release()
print ("Done!")
Done!
im = PIL.Image.open(imagesFolder + "\image_0.jpg")
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.imshow(im)
ax.axis('off')
plt.show()
<IPython.core.display.Javascript object>
def read_chessboards(images):
    """
    Charuco base pose estimation.
    """
    print("POSE ESTIMATION STARTS:")
    allCorners = []
    allIds = []
    decimator = 0

    for im in images:
        print("=> Processing image {0}".format(im))
        frame = cv2.imread(im)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        res = cv2.aruco.detectMarkers(gray, aruco_dict)

        if len(res[0])>0:
            res2 = cv2.aruco.interpolateCornersCharuco(res[0],res[1],gray,board)
            if res2[1] is not None and res2[2] is not None and len(res2[1])>3 and decimator%1==0:
                allCorners.append(res2[1])
                allIds.append(res2[2])

        decimator+=1

    imsize = gray.shape
    return allCorners,allIds,imsize
    print("finished")
#%%time
images = [imagesFolder + f for f in os.listdir(imagesFolder) if f.startswith("image_")]
allCorners,allIds,imsize=read_chessboards(images)
POSE ESTIMATION STARTS:
=> Processing image E:/Desktop/S8/Projet 851/data/image_69.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_139.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_15.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_145.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_12.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_142.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_60.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_130.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_67.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_137.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_52.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_2.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_83.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_102.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_29.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_5.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_55.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_84.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_105.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_27.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_20.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_31.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_36.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_38.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_114.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_95.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_44.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_113.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_92.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_43.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_126.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_76.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_121.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_71.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_128.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_78.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_21.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_26.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_28.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_54.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_4.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_85.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_104.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_3.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_53.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_82.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_103.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_66.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_136.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_61.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_131.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_13.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_143.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_68.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_138.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_14.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_144.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_129.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_79.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_120.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_70.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_127.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_77.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_112.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_93.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_42.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_39.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_115.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_94.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_45.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_37.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_30.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_124.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_74.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_123.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_73.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_116.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_97.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_46.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_111.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_90.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_41.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_33.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_34.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_118.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_99.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_48.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_25.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_59.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_9.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_88.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_109.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_22.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_0.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_50.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_81.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_100.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_57.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_7.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_86.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_107.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_62.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_132.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_65.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_135.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_19.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_149.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_17.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_147.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_10.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_140.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_35.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_119.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_98.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_49.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_32.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_110.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_91.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_40.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_117.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_96.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_47.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_122.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_72.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_125.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_75.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_11.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_141.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_16.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_146.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_64.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_134.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_18.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_148.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_63.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_133.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_6.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_56.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_87.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_106.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_51.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_1.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_80.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_101.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_23.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_24.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_8.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_58.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_89.jpg
=> Processing image E:/Desktop/S8/Projet 851/data/image_108.jpg
def calibrate_camera(allCorners,allIds,imsize):
    """
    Calibrates the camera using the dected corners.
    """
    print("CAMERA CALIBRATION")

    cameraMatrixInit = np.array([[ 2000.,    0., imsize[0]/2.],
                                 [    0., 2000., imsize[1]/2.],
                                 [    0.,    0.,           1.]])

    distCoeffsInit = np.zeros((5,1))
    flags = (cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_RATIONAL_MODEL)
    (ret, camera_matrix, distortion_coefficients0,
     rotation_vectors, translation_vectors,
     stdDeviationsIntrinsics, stdDeviationsExtrinsics,
     perViewErrors) = cv2.aruco.calibrateCameraCharucoExtended(
                      charucoCorners=allCorners,
                      charucoIds=allIds,
                      board=board,
                      imageSize=imsize,
                      cameraMatrix=cameraMatrixInit,
                      distCoeffs=distCoeffsInit,
                      flags=flags,
                      criteria=(cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 10000, 1e-9))

    return ret, camera_matrix, distortion_coefficients0, rotation_vectors, translation_vectors
    print("finished")
%%time
ret, mtx, dist, rvecs, tvecs = calibrate_camera(allCorners,allIds,imsize)
ret
CAMERA CALIBRATION
Wall time: 12min 46s
mtx
array([[  1.46963466e+03,   0.00000000e+00,   2.63094117e+02],
       [  0.00000000e+00,   1.47297770e+03,   3.19127464e+02],
       [  0.00000000e+00,   0.00000000e+00,   1.00000000e+00]])
dist
array([[ -4.15557150e+00],
       [  8.04563425e+02],
       [  1.72644822e-01],
       [ -4.62914356e-02],
       [ -1.41439828e+04],
       [  4.99936408e+00],
       [ -2.89968864e+02],
       [  1.96691829e+04],
       [  0.00000000e+00],
       [  0.00000000e+00],
       [  0.00000000e+00],
       [  0.00000000e+00],
       [  0.00000000e+00],
       [  0.00000000e+00]])
np.savetxt(imagesFolder+"calib_mtx_webcam.csv", mtx)
np.savetxt(imagesFolder+"calib_dist_webcam.csv", dist)
Check calibration
i=24 # select image id
plt.figure()
frame = cv2.imread(imagesFolder + "image_100.jpg".format(i))
img_undist = cv2.undistort(frame,mtx,dist,None)
plt.subplot(211)
plt.imshow(frame)
plt.title("Raw image")
plt.axis("off")
plt.subplot(212)
plt.imshow(img_undist)
plt.title("Corrected image")
plt.axis("off")
plt.show()
<IPython.core.display.Javascript object>
Use of camera calibration to estimate 3D translation and rotation of each marker on a scene
frame = cv2.imread(imagesFolder + "image_10.jpg")
plt.figure()
plt.imshow(frame)
plt.show()
<IPython.core.display.Javascript object>
Post processing
%%time

gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters =  aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
frame_markers = aruco.drawDetectedMarkers(frame.copy(), corners, ids)
Wall time: 38.1 ms
Result
conn = np.array([0, 1, 2, 3, 0])
plt.figure()
plt.imshow(frame_markers)
plt.legend()
plt.show()
<IPython.core.display.Javascript object>
No handles with labels found to put in legend.
Add local axis on each maker
size_of_marker =  0.0145 # side lenght of the marker in meter
rvecs,tvecs, trash = aruco.estimatePoseSingleMarkers(corners, size_of_marker , mtx, dist)
tvecs
array([[[ 0.03625717,  0.04684617,  0.46653871]],

       [[ 0.07796683,  0.00174052,  0.44514342]],

       [[ 0.0131153 , -0.02022943,  0.44694283]],

       [[-0.11509333, -0.02061886,  0.4465331 ]],

       [[ 0.07544442, -0.0398416 ,  0.43262915]],

       [[ 0.09802467, -0.06131984,  0.44035666]],

       [[ 0.05448294, -0.06054667,  0.4326234 ]],

       [[ 0.05369867,  0.0631415 ,  0.42116605]],

       [[-0.11193155,  0.06244439,  0.427018  ]],

       [[ 0.05551877,  0.02247846,  0.43705436]],

       [[ 0.05492523, -0.01945728,  0.43442988]],

       [[-0.00789508, -0.03981689,  0.42867534]],

       [[-0.07027212, -0.06136102,  0.43652852]],

       [[ 0.07773333,  0.08803092,  0.43954609]],

       [[ 0.03309225,  0.08473541,  0.42561175]],

       [[-0.0083905 ,  0.08445285,  0.42618681]],

       [[-0.05225633,  0.08741113,  0.4432211 ]],

       [[-0.09553183,  0.08714634,  0.44444085]],

       [[ 0.09551237,  0.06367101,  0.42236155]],

       [[ 0.01270016,  0.0654016 ,  0.43867211]],

       [[-0.02963197,  0.06396202,  0.4309523 ]],

       [[-0.07063805,  0.06274348,  0.42591115]],

       [[ 0.07741211,  0.04443328,  0.43943835]],

       [[-0.00828486,  0.04297415,  0.42962259]],

       [[-0.05158105,  0.04377289,  0.44160457]],

       [[-0.09398191,  0.04328224,  0.44114026]],

       [[ 0.09948571,  0.02302646,  0.44172269]],

       [[ 0.01301304,  0.02262962,  0.44414953]],

       [[-0.02986066,  0.02198689,  0.4362466 ]],

       [[-0.07277628,  0.02201956,  0.44309408]],

       [[-0.11713915,  0.0219428 ,  0.45057945]],

       [[ 0.03385166,  0.00165063,  0.43509877]],

       [[-0.00814628,  0.00132911,  0.42935713]],

       [[-0.05144476,  0.00123526,  0.44487906]],

       [[-0.09568427,  0.00092015,  0.45336912]],

       [[ 0.10089582, -0.01985325,  0.45093768]],

       [[-0.02939441, -0.0199319 ,  0.4364838 ]],

       [[-0.07200425, -0.0203373 ,  0.44242592]],

       [[-0.05016834, -0.04077952,  0.43744884]],

       [[-0.0918302 , -0.04095322,  0.43837771]],

       [[ 0.01312589, -0.06296745,  0.44912868]],

       [[-0.0291022 , -0.06147094,  0.4378132 ]],

       [[-0.11297562, -0.0621073 ,  0.44228741]]])
tvecs.shape
(43, 1, 3)
np.degrees(rvecs)
array([[[  1.62516394e+02,   2.46355609e+00,  -1.09402896e+01]],

       [[  1.79457420e+02,   7.19277484e-02,  -1.32140582e+00]],

       [[  1.77773147e+02,  -9.91113450e-01,   5.53146653e+00]],

       [[ -1.85403347e+02,  -2.23002873e-01,   1.24063729e+01]],

       [[  1.81444729e+02,  -7.25485937e-01,   1.04275177e+01]],

       [[  1.77398449e+02,  -2.71016859e-01,   7.89579400e+00]],

       [[  1.73242093e+02,   1.79338050e+00,  -5.06408000e+00]],

       [[  1.66617310e+02,  -7.61331890e-01,  -1.96025399e+01]],

       [[ -1.79129608e+02,  -4.21227613e+00,  -6.45245428e+01]],

       [[  1.81282644e+02,  -1.57056435e-01,  -6.86839568e-01]],

       [[  1.65840660e+02,   9.53216792e-01,  -4.33656312e+00]],

       [[  1.76040502e+02,   1.50523348e+00,  -1.30437940e+01]],

       [[  1.72773893e+02,   9.89154542e-01,  -6.18063691e+00]],

       [[ -1.83491628e+02,  -2.23552817e+00,  -8.18637179e-01]],

       [[ -1.76798040e+02,  -1.43834284e+00,   2.15416822e-01]],

       [[ -1.77092833e+02,   2.28512214e-02,   6.79815955e-01]],

       [[ -1.91819997e+02,  -4.62989175e+00,  -2.72407766e+01]],

       [[ -1.88598008e+02,  -4.31571915e+00,  -4.45937173e+01]],

       [[  1.72422229e+02,   2.36525196e+00,   1.21332142e+01]],

       [[  1.70530658e+02,   9.57492283e-01,  -7.11983884e+00]],

       [[ -1.87682503e+02,  -2.04001941e+00,  -3.27632552e+00]],

       [[  1.72237634e+02,   2.40773992e-01,  -7.26850888e+00]],

       [[  1.76200826e+02,   2.12251694e+00,   8.50244737e+00]],

       [[ -1.70882183e+02,  -1.02785140e+00,  -1.36475536e+00]],

       [[  1.66528971e+02,   7.38399741e-01,   2.02590218e+01]],

       [[  1.74711765e+02,   6.94637499e-01,  -1.47345906e+01]],

       [[ -1.82368806e+02,  -8.91134423e-01,  -3.14684799e+00]],

       [[ -1.79633708e+02,  -8.69525993e-01,   1.46194101e+01]],

       [[ -1.71473555e+02,  -2.21806078e+00,  -5.66001095e+00]],

       [[  1.83622188e+02,   2.59067337e+00,   1.22927092e+01]],

       [[  1.74751206e+02,   7.16637262e-01,  -3.74674508e+00]],

       [[  1.93004493e+02,  -5.16294249e-01,  -1.14421246e+01]],

       [[  1.63289047e+02,   5.75137737e-01,   3.98558399e-01]],

       [[  1.76744726e+02,   2.63313500e-01,  -1.71716506e+01]],

       [[  1.78244412e+02,   1.48455510e+00,  -4.94199360e+00]],

       [[ -1.67227497e+02,   1.13991745e+00,   3.64310604e+01]],

       [[  1.96110861e+02,   1.20281778e+00,   4.91750442e+00]],

       [[ -1.90395446e+02,  -2.17143285e-01,  -8.94562363e+00]],

       [[  1.80592357e+02,   2.08727304e+00,  -2.16501956e+01]],

       [[  1.77081940e+02,   3.29046736e+00,  -1.88123338e+01]],

       [[  1.86433786e+02,   2.42004714e+00,  -2.39405027e+01]],

       [[ -1.60978143e+02,  -1.03801207e-01,  -1.69359969e+01]],

       [[  1.74702391e+02,   3.89745168e+00,  -1.58659988e+01]]])
length_of_axis = 0.01
imaxis = aruco.drawDetectedMarkers(frame.copy(), corners, ids)
for i in range(len(tvecs)):
    imaxis = aruco.drawAxis(imaxis, mtx, dist, rvecs[i], tvecs[i], length_of_axis)
plt.figure()
plt.imshow(imaxis)
plt.show()
<IPython.core.display.Javascript object>
data=pd.DataFrame(data=tvecs.reshape(43,3),columns=["tx","ty","tz"],index=ids.flatten())
data.index.name="makers"
data.sort_index(inplace=True)
data
tx ty tz
makers
0 -0.112976 -0.062107 0.442287
1 -0.070272 -0.061361 0.436529
2 -0.029102 -0.061471 0.437813
3 0.013126 -0.062967 0.449129
4 0.054483 -0.060547 0.432623
5 0.098025 -0.061320 0.440357
6 -0.091830 -0.040953 0.438378
7 -0.050168 -0.040780 0.437449
8 -0.007895 -0.039817 0.428675
10 0.075444 -0.039842 0.432629
11 -0.115093 -0.020619 0.446533
12 -0.072004 -0.020337 0.442426
13 -0.029394 -0.019932 0.436484
14 0.013115 -0.020229 0.446943
15 0.054925 -0.019457 0.434430
16 0.100896 -0.019853 0.450938
17 -0.095684 0.000920 0.453369
18 -0.051445 0.001235 0.444879
19 -0.008146 0.001329 0.429357
20 0.033852 0.001651 0.435099
21 0.077967 0.001741 0.445143
22 -0.117139 0.021943 0.450579
23 -0.072776 0.022020 0.443094
24 -0.029861 0.021987 0.436247
25 0.013013 0.022630 0.444150
26 0.055519 0.022478 0.437054
27 0.099486 0.023026 0.441723
28 -0.093982 0.043282 0.441140
29 -0.051581 0.043773 0.441605
30 -0.008285 0.042974 0.429623
31 0.036257 0.046846 0.466539
32 0.077412 0.044433 0.439438
33 -0.111932 0.062444 0.427018
34 -0.070638 0.062743 0.425911
35 -0.029632 0.063962 0.430952
36 0.012700 0.065402 0.438672
37 0.053699 0.063141 0.421166
38 0.095512 0.063671 0.422362
39 -0.095532 0.087146 0.444441
40 -0.052256 0.087411 0.443221
41 -0.008390 0.084453 0.426187
42 0.033092 0.084735 0.425612
43 0.077733 0.088031 0.439546
p=data.values
((p[1]-p[0])**2.).sum()**.5,((p[2]-p[1])**2.).sum()**.5,((p[3]-p[2])**2.).sum()**.5
(0.04309652659780655, 0.041190101023714003, 0.043743470242086288)
((data.loc[11]-data.loc[0]).values**2).sum()
0.001743801337263979
V0_1= p[1]-p[0]
V0_11=p[11]-p[0]
V0_1,V0_11
(array([ 0.0427035 ,  0.00074629, -0.00575889]),
 array([ 0.04097137,  0.04177   ,  0.00013852]))
np.dot(V0_1,V0_11)
0.0017799953347752665
fig=plt.figure()
ax= fig.add_subplot(1,1,1)
ax.set_aspect("equal")
plt.plot(data.tx[:10], data.ty[:10],"or-")
plt.grid()
plt.show()
<IPython.core.display.Javascript object>
data.tx
corners=np.array(corners)
data2=pd.DataFrame({"px":corners[:,0,0,1],"py":corners[:,0,0,0]},index=ids.flatten())
data2.sort_index(inplace=True)

data2
px py
0 229.0 335.0
1 230.0 465.0
2 230.0 595.0
3 231.0 729.0
4 231.0 861.0
5 232.0 995.0
6 316.0 399.0
7 317.0 530.0
8 318.0 662.0
10 319.0 928.0
11 405.0 331.0
12 405.0 462.0
13 406.0 594.0
14 407.0 728.0
15 408.0 861.0
16 409.0 996.0
17 494.0 396.0
18 495.0 529.0
19 496.0 661.0
20 497.0 794.0
21 497.0 929.0
22 580.0 328.0
23 581.0 460.0
24 582.0 593.0
25 583.0 728.0
26 584.0 862.0
27 585.0 998.0
28 670.0 394.0
29 672.0 526.0
30 673.0 660.0
31 676.0 796.0
32 675.0 931.0
33 758.0 324.0
34 760.0 458.0
35 761.0 592.0
36 762.0 728.0
37 764.0 862.0
38 765.0 999.0
39 851.0 390.0
40 852.0 525.0
41 854.0 659.0
42 855.0 794.0
43 857.0 931.0
n0=data2.loc[0]
n1=data2.loc[1]
d01=((n0-n1).values**2).sum()**.5
d=42.5e-3
factor=d/d01
data2["x"]=data2.px*factor
data2["y"]=data2.py*factor
d1_0=data2.loc[2].y-data2.loc[1].y
d11_0=data2.loc[11].x-data2.loc[0].x
d1_0
0.042498738
d11_0
0.057536766
imagesFolder = "E:\Desktop\S8\Projet 851\data"
dictionary = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_6X6_250)
board = cv2.aruco.CharucoBoard_create(3,3,.025,.0125,dictionary)
img = board.draw((200*3,200*3))
cv2.imwrite(imagesFolder + '\charucotest.png',img)
True
help (aruco)
Help on module cv2.aruco in cv2:

NAME
    cv2.aruco

FUNCTIONS
    Board_create(...)
        Board_create(objPoints, dictionary, ids) -> retval
        .   * @brief Provide way to create Board by passing nessesary data. Specially needed in Python.
        .   *
        .   * @param objPoints array of object points of all the marker corners in the board
        .   * @param dictionary the dictionary of markers employed for this board
        .   * @param ids vector of the identifiers of the markers in the board
        .   *

    CharucoBoard_create(...)
        CharucoBoard_create(squaresX, squaresY, squareLength, markerLength, dictionary) -> retval
        .   * @brief Create a CharucoBoard object
        .   *
        .   * @param squaresX number of chessboard squares in X direction
        .   * @param squaresY number of chessboard squares in Y direction
        .   * @param squareLength chessboard square side length (normally in meters)
        .   * @param markerLength marker side length (same unit than squareLength)
        .   * @param dictionary dictionary of markers indicating the type of markers.
        .   * The first markers in the dictionary are used to fill the white chessboard squares.
        .   * @return the output CharucoBoard object
        .   *
        .   * This functions creates a CharucoBoard object given the number of squares in each direction
        .   * and the size of the markers and chessboard squares.

    DetectorParameters_create(...)
        DetectorParameters_create() -> retval
        .

    Dictionary_create(...)
        Dictionary_create(nMarkers, markerSize) -> retval
        .   * @see generateCustomDictionary

    Dictionary_create_from(...)
        Dictionary_create_from(nMarkers, markerSize, baseDictionary) -> retval
        .   * @see generateCustomDictionary

    Dictionary_get(...)
        Dictionary_get(dict) -> retval
        .   * @see getPredefinedDictionary

    GridBoard_create(...)
        GridBoard_create(markersX, markersY, markerLength, markerSeparation, dictionary[, firstMarker]) -> retval
        .   * @brief Create a GridBoard object
        .   *
        .   * @param markersX number of markers in X direction
        .   * @param markersY number of markers in Y direction
        .   * @param markerLength marker side length (normally in meters)
        .   * @param markerSeparation separation between two markers (same unit as markerLength)
        .   * @param dictionary dictionary of markers indicating the type of markers
        .   * @param firstMarker id of first marker in dictionary to use on board.
        .   * @return the output GridBoard object
        .   *
        .   * This functions creates a GridBoard object given the number of markers in each direction and
        .   * the marker size and marker separation.

    calibrateCameraAruco(...)
        calibrateCameraAruco(corners, ids, counter, board, imageSize, cameraMatrix, distCoeffs[, rvecs[, tvecs[, flags[, criteria]]]]) -> retval, cameraMatrix, distCoeffs, rvecs, tvecs
        .   @brief It's the same function as #calibrateCameraAruco but without calibration error estimation.

    calibrateCameraArucoExtended(...)
        calibrateCameraArucoExtended(corners, ids, counter, board, imageSize, cameraMatrix, distCoeffs[, rvecs[, tvecs[, stdDeviationsIntrinsics[, stdDeviationsExtrinsics[, perViewErrors[, flags[, criteria]]]]]]]) -> retval, cameraMatrix, distCoeffs, rvecs, tvecs, stdDeviationsIntrinsics, stdDeviationsExtrinsics, perViewErrors
        .   * @brief Calibrate a camera using aruco markers
        .   *
        .   * @param corners vector of detected marker corners in all frames.
        .   * The corners should have the same format returned by detectMarkers (see #detectMarkers).
        .   * @param ids list of identifiers for each marker in corners
        .   * @param counter number of markers in each frame so that corners and ids can be split
        .   * @param board Marker Board layout
        .   * @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
        .   * @param cameraMatrix Output 3x3 floating-point camera matrix
        .   * f$A = vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}f$ . If CV_CALIB_USE_INTRINSIC_GUESS
        .   * and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
        .   * initialized before calling the function.
        .   * @param distCoeffs Output vector of distortion coefficients
        .   * f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])f$ of 4, 5, 8 or 12 elements
        .   * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view
        .   * (e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
        .   * k-th translation vector (see the next output parameter description) brings the board pattern
        .   * from the model coordinate space (in which object points are specified) to the world coordinate
        .   * space, that is, a real position of the board pattern in the k-th pattern view (k=0.. M -1).
        .   * @param tvecs Output vector of translation vectors estimated for each pattern view.
        .   * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
        .   * Order of deviations values:
        .   * f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
        .   * s_4, tau_x, tau_y)f$ If one of parameters is not estimated, it's deviation is equals to zero.
        .   * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
        .   * Order of deviations values: f$(R_1, T_1, dotsc , R_M, T_M)f$ where M is number of pattern views,
        .   * f$R_i, T_if$ are concatenated 1x3 vectors.
        .   * @param perViewErrors Output vector of average re-projection errors estimated for each pattern view.
        .   * @param flags flags Different flags  for the calibration process (see #calibrateCamera for details).
        .   * @param criteria Termination criteria for the iterative optimization algorithm.
        .   *
        .   * This function calibrates a camera using an Aruco Board. The function receives a list of
        .   * detected markers from several views of the Board. The process is similar to the chessboard
        .   * calibration in calibrateCamera(). The function returns the final re-projection error.

    calibrateCameraCharuco(...)
        calibrateCameraCharuco(charucoCorners, charucoIds, board, imageSize, cameraMatrix, distCoeffs[, rvecs[, tvecs[, flags[, criteria]]]]) -> retval, cameraMatrix, distCoeffs, rvecs, tvecs
        .   @brief It's the same function as #calibrateCameraCharuco but without calibration error estimation.

    calibrateCameraCharucoExtended(...)
        calibrateCameraCharucoExtended(charucoCorners, charucoIds, board, imageSize, cameraMatrix, distCoeffs[, rvecs[, tvecs[, stdDeviationsIntrinsics[, stdDeviationsExtrinsics[, perViewErrors[, flags[, criteria]]]]]]]) -> retval, cameraMatrix, distCoeffs, rvecs, tvecs, stdDeviationsIntrinsics, stdDeviationsExtrinsics, perViewErrors
        .   * @brief Calibrate a camera using Charuco corners
        .   *
        .   * @param charucoCorners vector of detected charuco corners per frame
        .   * @param charucoIds list of identifiers for each corner in charucoCorners per frame
        .   * @param board Marker Board layout
        .   * @param imageSize input image size
        .   * @param cameraMatrix Output 3x3 floating-point camera matrix
        .   * f$A = vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}f$ . If CV_CALIB_USE_INTRINSIC_GUESS
        .   * and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
        .   * initialized before calling the function.
        .   * @param distCoeffs Output vector of distortion coefficients
        .   * f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])f$ of 4, 5, 8 or 12 elements
        .   * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view
        .   * (e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
        .   * k-th translation vector (see the next output parameter description) brings the board pattern
        .   * from the model coordinate space (in which object points are specified) to the world coordinate
        .   * space, that is, a real position of the board pattern in the k-th pattern view (k=0.. M -1).
        .   * @param tvecs Output vector of translation vectors estimated for each pattern view.
        .   * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
        .   * Order of deviations values:
        .   * f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
        .   * s_4, tau_x, tau_y)f$ If one of parameters is not estimated, it's deviation is equals to zero.
        .   * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
        .   * Order of deviations values: f$(R_1, T_1, dotsc , R_M, T_M)f$ where M is number of pattern views,
        .   * f$R_i, T_if$ are concatenated 1x3 vectors.
        .   * @param perViewErrors Output vector of average re-projection errors estimated for each pattern view.
        .   * @param flags flags Different flags  for the calibration process (see #calibrateCamera for details).
        .   * @param criteria Termination criteria for the iterative optimization algorithm.
        .   *
        .   * This function calibrates a camera using a set of corners of a  Charuco Board. The function
        .   * receives a list of detected corners and its identifiers from several views of the Board.
        .   * The function returns the final re-projection error.

    custom_dictionary(...)
        custom_dictionary(nMarkers, markerSize) -> retval
        .   * @see generateCustomDictionary

    custom_dictionary_from(...)
        custom_dictionary_from(nMarkers, markerSize, baseDictionary) -> retval
        .   * @brief Generates a new customizable marker dictionary
        .   *
        .   * @param nMarkers number of markers in the dictionary
        .   * @param markerSize number of bits per dimension of each markers
        .   * @param baseDictionary Include the markers in this dictionary at the beginning (optional)
        .   *
        .   * This function creates a new dictionary composed by nMarkers markers and each markers composed
        .   * by markerSize x markerSize bits. If baseDictionary is provided, its markers are directly
        .   * included and the rest are generated based on them. If the size of baseDictionary is higher
        .   * than nMarkers, only the first nMarkers in baseDictionary are taken and no new marker is added.

    detectCharucoDiamond(...)
        detectCharucoDiamond(image, markerCorners, markerIds, squareMarkerLengthRate[, diamondCorners[, diamondIds[, cameraMatrix[, distCoeffs]]]]) -> diamondCorners, diamondIds
        .   * @brief Detect ChArUco Diamond markers
        .   *
        .   * @param image input image necessary for corner subpixel.
        .   * @param markerCorners list of detected marker corners from detectMarkers function.
        .   * @param markerIds list of marker ids in markerCorners.
        .   * @param squareMarkerLengthRate rate between square and marker length:
        .   * squareMarkerLengthRate = squareLength/markerLength. The real units are not necessary.
        .   * @param diamondCorners output list of detected diamond corners (4 corners per diamond). The order
        .   * is the same than in marker corners: top left, top right, bottom right and bottom left. Similar
        .   * format than the corners returned by detectMarkers (e.g std::vector<std::vector<cv::Point2f> > ).
        .   * @param diamondIds ids of the diamonds in diamondCorners. The id of each diamond is in fact of
        .   * type Vec4i, so each diamond has 4 ids, which are the ids of the aruco markers composing the
        .   * diamond.
        .   * @param cameraMatrix Optional camera calibration matrix.
        .   * @param distCoeffs Optional camera distortion coefficients.
        .   *
        .   * This function detects Diamond markers from the previous detected ArUco markers. The diamonds
        .   * are returned in the diamondCorners and diamondIds parameters. If camera calibration parameters
        .   * are provided, the diamond search is based on reprojection. If not, diamond search is based on
        .   * homography. Homography is faster than reprojection but can slightly reduce the detection rate.

    detectMarkers(...)
        detectMarkers(image, dictionary[, corners[, ids[, parameters[, rejectedImgPoints[, cameraMatrix[, distCoeff]]]]]]) -> corners, ids, rejectedImgPoints
        .   * @brief Basic marker detection
        .   *
        .   * @param image input image
        .   * @param dictionary indicates the type of markers that will be searched
        .   * @param corners vector of detected marker corners. For each marker, its four corners
        .   * are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
        .   * the dimensions of this array is Nx4. The order of the corners is clockwise.
        .   * @param ids vector of identifiers of the detected markers. The identifier is of type int
        .   * (e.g. std::vector<int>). For N detected markers, the size of ids is also N.
        .   * The identifiers have the same order than the markers in the imgPoints array.
        .   * @param parameters marker detection parameters
        .   * @param rejectedImgPoints contains the imgPoints of those squares whose inner code has not a
        .   * correct codification. Useful for debugging purposes.
        .   * @param cameraMatrix optional input 3x3 floating-point camera matrix
        .   * f$A = vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}f$
        .   * @param distCoeff optional vector of distortion coefficients
        .   * f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])f$ of 4, 5, 8 or 12 elements
        .   *
        .   * Performs marker detection in the input image. Only markers included in the specific dictionary
        .   * are searched. For each detected marker, it returns the 2D position of its corner in the image
        .   * and its corresponding identifier.
        .   * Note that this function does not perform pose estimation.
        .   * @sa estimatePoseSingleMarkers,  estimatePoseBoard
        .   *

    drawAxis(...)
        drawAxis(image, cameraMatrix, distCoeffs, rvec, tvec, length) -> image
        .   * @brief Draw coordinate system axis from pose estimation
        .   *
        .   * @param image input/output image. It must have 1 or 3 channels. The number of channels is not
        .   * altered.
        .   * @param cameraMatrix input 3x3 floating-point camera matrix
        .   * f$A = vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}f$
        .   * @param distCoeffs vector of distortion coefficients
        .   * f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])f$ of 4, 5, 8 or 12 elements
        .   * @param rvec rotation vector of the coordinate system that will be drawn. (@sa Rodrigues).
        .   * @param tvec translation vector of the coordinate system that will be drawn.
        .   * @param length length of the painted axis in the same unit than tvec (usually in meters)
        .   *
        .   * Given the pose estimation of a marker or board, this function draws the axis of the world
        .   * coordinate system, i.e. the system centered on the marker/board. Useful for debugging purposes.

    drawDetectedCornersCharuco(...)
        drawDetectedCornersCharuco(image, charucoCorners[, charucoIds[, cornerColor]]) -> image
        .   * @brief Draws a set of Charuco corners
        .   * @param image input/output image. It must have 1 or 3 channels. The number of channels is not
        .   * altered.
        .   * @param charucoCorners vector of detected charuco corners
        .   * @param charucoIds list of identifiers for each corner in charucoCorners
        .   * @param cornerColor color of the square surrounding each corner
        .   *
        .   * This function draws a set of detected Charuco corners. If identifiers vector is provided, it also
        .   * draws the id of each corner.

    drawDetectedDiamonds(...)
        drawDetectedDiamonds(image, diamondCorners[, diamondIds[, borderColor]]) -> image
        .   * @brief Draw a set of detected ChArUco Diamond markers
        .   *
        .   * @param image input/output image. It must have 1 or 3 channels. The number of channels is not
        .   * altered.
        .   * @param diamondCorners positions of diamond corners in the same format returned by
        .   * detectCharucoDiamond(). (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
        .   * the dimensions of this array should be Nx4. The order of the corners should be clockwise.
        .   * @param diamondIds vector of identifiers for diamonds in diamondCorners, in the same format
        .   * returned by detectCharucoDiamond() (e.g. std::vector<Vec4i>).
        .   * Optional, if not provided, ids are not painted.
        .   * @param borderColor color of marker borders. Rest of colors (text color and first corner color)
        .   * are calculated based on this one.
        .   *
        .   * Given an array of detected diamonds, this functions draws them in the image. The marker borders
        .   * are painted and the markers identifiers if provided.
        .   * Useful for debugging purposes.

    drawDetectedMarkers(...)
        drawDetectedMarkers(image, corners[, ids[, borderColor]]) -> image
        .   * @brief Draw detected markers in image
        .   *
        .   * @param image input/output image. It must have 1 or 3 channels. The number of channels is not
        .   * altered.
        .   * @param corners positions of marker corners on input image.
        .   * (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the dimensions of
        .   * this array should be Nx4. The order of the corners should be clockwise.
        .   * @param ids vector of identifiers for markers in markersCorners .
        .   * Optional, if not provided, ids are not painted.
        .   * @param borderColor color of marker borders. Rest of colors (text color and first corner color)
        .   * are calculated based on this one to improve visualization.
        .   *
        .   * Given an array of detected marker corners and its corresponding ids, this functions draws
        .   * the markers in the image. The marker borders are painted and the markers identifiers if provided.
        .   * Useful for debugging purposes.

    drawMarker(...)
        drawMarker(dictionary, id, sidePixels[, img[, borderBits]]) -> img
        .   * @brief Draw a canonical marker image
        .   *
        .   * @param dictionary dictionary of markers indicating the type of markers
        .   * @param id identifier of the marker that will be returned. It has to be a valid id
        .   * in the specified dictionary.
        .   * @param sidePixels size of the image in pixels
        .   * @param img output image with the marker
        .   * @param borderBits width of the marker border.
        .   *
        .   * This function returns a marker image in its canonical form (i.e. ready to be printed)

    drawPlanarBoard(...)
        drawPlanarBoard(board, outSize[, img[, marginSize[, borderBits]]]) -> img
        .   * @brief Draw a planar board
        .   * @sa _drawPlanarBoardImpl
        .   *
        .   * @param board layout of the board that will be drawn. The board should be planar,
        .   * z coordinate is ignored
        .   * @param outSize size of the output image in pixels.
        .   * @param img output image with the board. The size of this image will be outSize
        .   * and the board will be on the center, keeping the board proportions.
        .   * @param marginSize minimum margins (in pixels) of the board in the output image
        .   * @param borderBits width of the marker borders.
        .   *
        .   * This function return the image of a planar board, ready to be printed. It assumes
        .   * the Board layout specified is planar by ignoring the z coordinates of the object points.

    estimatePoseBoard(...)
        estimatePoseBoard(corners, ids, board, cameraMatrix, distCoeffs[, rvec[, tvec[, useExtrinsicGuess]]]) -> retval, rvec, tvec
        .   * @brief Pose estimation for a board of markers
        .   *
        .   * @param corners vector of already detected markers corners. For each marker, its four corners
        .   * are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the
        .   * dimensions of this array should be Nx4. The order of the corners should be clockwise.
        .   * @param ids list of identifiers for each marker in corners
        .   * @param board layout of markers in the board. The layout is composed by the marker identifiers
        .   * and the positions of each marker corner in the board reference system.
        .   * @param cameraMatrix input 3x3 floating-point camera matrix
        .   * f$A = vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}f$
        .   * @param distCoeffs vector of distortion coefficients
        .   * f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])f$ of 4, 5, 8 or 12 elements
        .   * @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board
        .   * (see cv::Rodrigues). Used as initial guess if not empty.
        .   * @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board.
        .   * @param useExtrinsicGuess defines whether initial guess for b rvec and b tvec will be used or not.
        .   * Used as initial guess if not empty.
        .   *
        .   * This function receives the detected markers and returns the pose of a marker board composed
        .   * by those markers.
        .   * A Board of marker has a single world coordinate system which is defined by the board layout.
        .   * The returned transformation is the one that transforms points from the board coordinate system
        .   * to the camera coordinate system.
        .   * Input markers that are not included in the board layout are ignored.
        .   * The function returns the number of markers from the input employed for the board pose estimation.
        .   * Note that returning a 0 means the pose has not been estimated.

    estimatePoseCharucoBoard(...)
        estimatePoseCharucoBoard(charucoCorners, charucoIds, board, cameraMatrix, distCoeffs[, rvec[, tvec[, useExtrinsicGuess]]]) -> retval, rvec, tvec
        .   * @brief Pose estimation for a ChArUco board given some of their corners
        .   * @param charucoCorners vector of detected charuco corners
        .   * @param charucoIds list of identifiers for each corner in charucoCorners
        .   * @param board layout of ChArUco board.
        .   * @param cameraMatrix input 3x3 floating-point camera matrix
        .   * f$A = vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}f$
        .   * @param distCoeffs vector of distortion coefficients
        .   * f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])f$ of 4, 5, 8 or 12 elements
        .   * @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board
        .   * (see cv::Rodrigues).
        .   * @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board.
        .   * @param useExtrinsicGuess defines whether initial guess for b rvec and b tvec will be used or not.
        .   *
        .   * This function estimates a Charuco board pose from some detected corners.
        .   * The function checks if the input corners are enough and valid to perform pose estimation.
        .   * If pose estimation is valid, returns true, else returns false.

    estimatePoseSingleMarkers(...)
        estimatePoseSingleMarkers(corners, markerLength, cameraMatrix, distCoeffs[, rvecs[, tvecs[, _objPoints]]]) -> rvecs, tvecs, _objPoints
        .   * @brief Pose estimation for single markers
        .   *
        .   * @param corners vector of already detected markers corners. For each marker, its four corners
        .   * are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
        .   * the dimensions of this array should be Nx4. The order of the corners should be clockwise.
        .   * @sa detectMarkers
        .   * @param markerLength the length of the markers' side. The returning translation vectors will
        .   * be in the same unit. Normally, unit is meters.
        .   * @param cameraMatrix input 3x3 floating-point camera matrix
        .   * f$A = vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}f$
        .   * @param distCoeffs vector of distortion coefficients
        .   * f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])f$ of 4, 5, 8 or 12 elements
        .   * @param rvecs array of output rotation vectors (@sa Rodrigues) (e.g. std::vector<cv::Vec3d>).
        .   * Each element in rvecs corresponds to the specific marker in imgPoints.
        .   * @param tvecs array of output translation vectors (e.g. std::vector<cv::Vec3d>).
        .   * Each element in tvecs corresponds to the specific marker in imgPoints.
        .   * @param _objPoints array of object points of all the marker corners
        .   *
        .   * This function receives the detected markers and returns their pose estimation respect to
        .   * the camera individually. So for each marker, one rotation and translation vector is returned.
        .   * The returned transformation is the one that transforms points from each marker coordinate system
        .   * to the camera coordinate system.
        .   * The marker corrdinate system is centered on the middle of the marker, with the Z axis
        .   * perpendicular to the marker plane.
        .   * The coordinates of the four corners of the marker in its own coordinate system are:
        .   * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0),
        .   * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0)

    getBoardObjectAndImagePoints(...)
        getBoardObjectAndImagePoints(board, detectedCorners, detectedIds[, objPoints[, imgPoints]]) -> objPoints, imgPoints
        .   * @brief Given a board configuration and a set of detected markers, returns the corresponding
        .   * image points and object points to call solvePnP
        .   *
        .   * @param board Marker board layout.
        .   * @param detectedCorners List of detected marker corners of the board.
        .   * @param detectedIds List of identifiers for each marker.
        .   * @param objPoints Vector of vectors of board marker points in the board coordinate space.
        .   * @param imgPoints Vector of vectors of the projections of board marker corner points.

    getPredefinedDictionary(...)
        getPredefinedDictionary(dict) -> retval
        .   * @brief Returns one of the predefined dictionaries referenced by DICT_*.

    interpolateCornersCharuco(...)
        interpolateCornersCharuco(markerCorners, markerIds, image, board[, charucoCorners[, charucoIds[, cameraMatrix[, distCoeffs[, minMarkers]]]]]) -> retval, charucoCorners, charucoIds
        .   * @brief Interpolate position of ChArUco board corners
        .   * @param markerCorners vector of already detected markers corners. For each marker, its four
        .   * corners are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the
        .   * dimensions of this array should be Nx4. The order of the corners should be clockwise.
        .   * @param markerIds list of identifiers for each marker in corners
        .   * @param image input image necesary for corner refinement. Note that markers are not detected and
        .   * should be sent in corners and ids parameters.
        .   * @param board layout of ChArUco board.
        .   * @param charucoCorners interpolated chessboard corners
        .   * @param charucoIds interpolated chessboard corners identifiers
        .   * @param cameraMatrix optional 3x3 floating-point camera matrix
        .   * f$A = vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}f$
        .   * @param distCoeffs optional vector of distortion coefficients
        .   * f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])f$ of 4, 5, 8 or 12 elements
        .   * @param minMarkers number of adjacent markers that must be detected to return a charuco corner
        .   *
        .   * This function receives the detected markers and returns the 2D position of the chessboard corners
        .   * from a ChArUco board using the detected Aruco markers. If camera parameters are provided,
        .   * the process is based in an approximated pose estimation, else it is based on local homography.
        .   * Only visible corners are returned. For each corner, its corresponding identifier is
        .   * also returned in charucoIds.
        .   * The function returns the number of interpolated corners.

    refineDetectedMarkers(...)
        refineDetectedMarkers(image, board, detectedCorners, detectedIds, rejectedCorners[, cameraMatrix[, distCoeffs[, minRepDistance[, errorCorrectionRate[, checkAllOrders[, recoveredIdxs[, parameters]]]]]]]) -> detectedCorners, detectedIds, rejectedCorners, recoveredIdxs
        .   * @brief Refind not detected markers based on the already detected and the board layout
        .   *
        .   * @param image input image
        .   * @param board layout of markers in the board.
        .   * @param detectedCorners vector of already detected marker corners.
        .   * @param detectedIds vector of already detected marker identifiers.
        .   * @param rejectedCorners vector of rejected candidates during the marker detection process.
        .   * @param cameraMatrix optional input 3x3 floating-point camera matrix
        .   * f$A = vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}f$
        .   * @param distCoeffs optional vector of distortion coefficients
        .   * f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])f$ of 4, 5, 8 or 12 elements
        .   * @param minRepDistance minimum distance between the corners of the rejected candidate and the
        .   * reprojected marker in order to consider it as a correspondence.
        .   * @param errorCorrectionRate rate of allowed erroneous bits respect to the error correction
        .   * capability of the used dictionary. -1 ignores the error correction step.
        .   * @param checkAllOrders Consider the four posible corner orders in the rejectedCorners array.
        .   * If it set to false, only the provided corner order is considered (default true).
        .   * @param recoveredIdxs Optional array to returns the indexes of the recovered candidates in the
        .   * original rejectedCorners array.
        .   * @param parameters marker detection parameters
        .   *
        .   * This function tries to find markers that were not detected in the basic detecMarkers function.
        .   * First, based on the current detected marker and the board layout, the function interpolates
        .   * the position of the missing markers. Then it tries to find correspondence between the reprojected
        .   * markers and the rejected candidates based on the minRepDistance and errorCorrectionRate
        .   * parameters.
        .   * If camera parameters and distortion coefficients are provided, missing markers are reprojected
        .   * using projectPoint function. If not, missing marker projections are interpolated using global
        .   * homography, and all the marker corners in the board must have the same Z coordinate.

DATA
    CORNER_REFINE_CONTOUR = 2
    CORNER_REFINE_NONE = 0
    CORNER_REFINE_SUBPIX = 1
    DICT_4X4_100 = 1
    DICT_4X4_1000 = 3
    DICT_4X4_250 = 2
    DICT_4X4_50 = 0
    DICT_5X5_100 = 5
    DICT_5X5_1000 = 7
    DICT_5X5_250 = 6
    DICT_5X5_50 = 4
    DICT_6X6_100 = 9
    DICT_6X6_1000 = 11
    DICT_6X6_250 = 10
    DICT_6X6_50 = 8
    DICT_7X7_100 = 13
    DICT_7X7_1000 = 15
    DICT_7X7_250 = 14
    DICT_7X7_50 = 12
    DICT_ARUCO_ORIGINAL = 16

FILE
    (built-in)
charucoCorners=allCorners,
                      charucoIds=allIds,
                      board=board,
                      imageSize=imsize,
                      cameraMatrix=cameraMatrixInit,
                      distCoeffs=distCoeffsInit,
                      flags=flags,
                      criteria=(cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 10000, 1e-9)
help (aruco.calibrateCameraCharucoExtended)
Help on built-in function calibrateCameraCharuco:

calibrateCameraCharuco(...)
    calibrateCameraCharuco(charucoCorners, charucoIds, board, imageSize, cameraMatrix, distCoeffs[, rvecs[, tvecs[, flags[, criteria]]]]) -> retval, cameraMatrix, distCoeffs, rvecs, tvecs
    .   @brief It's the same function as #calibrateCameraCharuco but without calibration error estimation.

Note

This notebook can be downloaded here: aruco_basics.ipynb

ARUCO markers: basics
1: Marker creation
import numpy as np
import cv2, PIL
from cv2 import aruco
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
%matplotlib nbagg
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)

fig = plt.figure()
nx = 4
ny = 3
for i in range(1, nx*ny+1):
    ax = fig.add_subplot(ny,nx, i)
    img = aruco.drawMarker(aruco_dict,i, 700)
    plt.imshow(img, cmap = mpl.cm.gray, interpolation = "nearest")
    ax.axis("off")

plt.savefig("_data/markers.pdf")
plt.show()
<IPython.core.display.Javascript object>
3: Post processing
%%time
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters =  aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
frame_markers = aruco.drawDetectedMarkers(frame.copy(), corners, ids)
CPU times: user 420 ms, sys: 20 ms, total: 440 ms
Wall time: 172 ms

Pretty fast processing !

4: Results
plt.figure()
plt.imshow(frame_markers)
for i in range(len(ids)):
    c = corners[i][0]
    plt.plot([c[:, 0].mean()], [c[:, 1].mean()], "o", label = "id={0}".format(ids[i]))
plt.legend()
plt.show()
<IPython.core.display.Javascript object>
def quad_area(data):
    l = data.shape[0]//2
    corners = data[["c1", "c2", "c3", "c4"]].values.reshape(l, 2,4)
    c1 = corners[:, :, 0]
    c2 = corners[:, :, 1]
    c3 = corners[:, :, 2]
    c4 = corners[:, :, 3]
    e1 = c2-c1
    e2 = c3-c2
    e3 = c4-c3
    e4 = c1-c4
    a = -.5 * (np.cross(-e1, e2, axis = 1) + np.cross(-e3, e4, axis = 1))
    return a

corners2 = np.array([c[0] for c in corners])

data = pd.DataFrame({"x": corners2[:,:,0].flatten(), "y": corners2[:,:,1].flatten()},
                   index = pd.MultiIndex.from_product(
                           [ids.flatten(), ["c{0}".format(i )for i in np.arange(4)+1]],
                       names = ["marker", ""] ))

data = data.unstack().swaplevel(0, 1, axis = 1).stack()
data["m1"] = data[["c1", "c2"]].mean(axis = 1)
data["m2"] = data[["c2", "c3"]].mean(axis = 1)
data["m3"] = data[["c3", "c4"]].mean(axis = 1)
data["m4"] = data[["c4", "c1"]].mean(axis = 1)
data["o"] = data[["m1", "m2", "m3", "m4"]].mean(axis = 1)
data
c1 c2 c3 c4 m1 m2 m3 m4 o
marker
1 x 3114.0 2701.0 2467.0 2876.0 2907.5 2584.0 2671.5 2995.0 2789.50
y 1429.0 1597.0 1168.0 1019.0 1513.0 1382.5 1093.5 1224.0 1303.25
2 x 2593.0 2152.0 1939.0 2363.0 2372.5 2045.5 2151.0 2478.0 2261.75
y 1635.0 1804.0 1352.0 1209.0 1719.5 1578.0 1280.5 1422.0 1500.00
3 x 2037.0 1533.0 1350.0 1826.0 1785.0 1441.5 1588.0 1931.5 1686.50
y 1848.0 2032.0 1518.0 1381.0 1940.0 1775.0 1449.5 1614.5 1694.75
4 x 1409.0 822.0 670.0 1231.0 1115.5 746.0 950.5 1320.0 1033.00
y 2076.0 2281.0 1712.0 1553.0 2178.5 1996.5 1632.5 1814.5 1905.50
5 x 2820.0 2415.0 2217.0 2614.0 2617.5 2316.0 2415.5 2717.0 2516.50
y 924.0 1071.0 686.0 550.0 997.5 878.5 618.0 737.0 807.75
6 x 2316.0 1883.0 1705.0 2121.0 2099.5 1794.0 1913.0 2218.5 2006.25
y 1105.0 1248.0 860.0 720.0 1176.5 1054.0 790.0 912.5 983.25
7 x 1779.0 1311.0 1154.0 1603.0 1545.0 1232.5 1378.5 1691.0 1461.75
y 1279.0 1409.0 989.0 886.0 1344.0 1199.0 937.5 1082.5 1140.75
8 x 1193.0 640.0 525.0 1039.0 916.5 582.5 782.0 1116.0 849.25
y 1439.0 1592.0 1133.0 1013.0 1515.5 1362.5 1073.0 1226.0 1294.25
9 x 2561.0 2173.0 1998.0 2374.0 2367.0 2085.5 2186.0 2467.5 2276.50
y 464.0 598.0 272.0 146.0 531.0 435.0 209.0 305.0 370.00
10 x 2068.0 1667.0 1519.0 1902.0 1867.5 1593.0 1710.5 1985.0 1789.00
y 628.0 762.0 428.0 309.0 695.0 595.0 368.5 468.5 531.75
11 x 1563.0 1119.0 987.0 1411.0 1341.0 1053.0 1199.0 1487.0 1270.00
y 797.0 896.0 543.0 449.0 846.5 719.5 496.0 623.0 671.25
12 x 1008.0 501.0 407.0 881.0 754.5 454.0 644.0 944.5 699.25
y 920.0 1033.0 651.0 563.0 976.5 842.0 607.0 741.5 791.75

Note

This notebook can be downloaded here: aruco_basics_video.ipynb

ARUCO markers: basics
1: Marker creation
import numpy as np
import cv2, PIL
from cv2 import aruco
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
%matplotlib nbagg
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)

fig = plt.figure()
nx = 4
ny = 3
for i in range(1, nx*ny+1):
    ax = fig.add_subplot(ny,nx, i)
    img = aruco.drawMarker(aruco_dict,i, 700)
    plt.imshow(img, cmap = mpl.cm.gray, interpolation = "nearest")
    ax.axis("off")

plt.savefig("_data/markers.jpeg")
plt.show()
<IPython.core.display.Javascript object>
3: Post processing
%%time
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters =  aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
frame_markers = aruco.drawDetectedMarkers(frame.copy(), corners, ids)
Wall time: 178 ms
rejectedImgPoints[1]
array([[[ 1213.,  1229.],
        [ 1217.,  1221.],
        [ 1259.,  1224.],
        [ 1256.,  1229.]]], dtype=float32)
corners
[array([[[ 1339.,   951.],
         [ 1413.,   934.],
         [ 1434.,   981.],
         [ 1358.,   999.]]], dtype=float32), array([[[ 2247.,  1604.],
         [ 2306.,  1653.],
         [ 2263.,  1691.],
         [ 2203.,  1643.]]], dtype=float32), array([[[ 2071.,  1279.],
         [ 2101.,  1233.],
         [ 2162.,  1267.],
         [ 2132.,  1314.]]], dtype=float32), array([[[ 1209.,  1217.],
         [ 1297.,  1218.],
         [ 1290.,  1287.],
         [ 1201.,  1286.]]], dtype=float32), array([[[ 1507.,  1244.],
         [ 1510.,  1309.],
         [ 1421.,  1313.],
         [ 1419.,  1245.]]], dtype=float32), array([[[  940.,  1212.],
         [  933.,  1282.],
         [  840.,  1285.],
         [  849.,  1216.]]], dtype=float32), array([[[ 2736.,  1132.],
         [ 2764.,  1183.],
         [ 2723.,  1241.],
         [ 2701.,  1191.]]], dtype=float32), array([[[ 1140.,  1120.],
         [ 1129.,  1059.],
         [ 1214.,  1048.],
         [ 1226.,  1108.]]], dtype=float32), array([[[  990.,  1050.],
         [  906.,  1071.],
         [  885.,  1013.],
         [  968.,   993.]]], dtype=float32), array([[[ 1586.,   950.],
         [ 1513.,   929.],
         [ 1543.,   879.],
         [ 1616.,   899.]]], dtype=float32)]

Pretty fast processing !

4: Results
plt.figure()
plt.imshow(frame_markers, origin = "upper")
if ids is not None:
    for i in range(len(ids)):
        c = corners[i][0]
        plt.plot([c[:, 0].mean()], [c[:, 1].mean()], "+", label = "id={0}".format(ids[i]))
"""for points in rejectedImgPoints:
    y = points[:, 0]
    x = points[:, 1]
    plt.plot(x, y, ".m-", linewidth = 1.)"""
plt.legend()
plt.show()
<IPython.core.display.Javascript object>
def quad_area(data):
    l = data.shape[0]//2
    corners = data[["c1", "c2", "c3", "c4"]].values.reshape(l, 2,4)
    c1 = corners[:, :, 0]
    c2 = corners[:, :, 1]
    c3 = corners[:, :, 2]
    c4 = corners[:, :, 3]
    e1 = c2-c1
    e2 = c3-c2
    e3 = c4-c3
    e4 = c1-c4
    a = -.5 * (np.cross(-e1, e2, axis = 1) + np.cross(-e3, e4, axis = 1))
    return a

corners2 = np.array([c[0] for c in corners])

data = pd.DataFrame({"x": corners2[:,:,0].flatten(), "y": corners2[:,:,1].flatten()},
                   index = pd.MultiIndex.from_product(
                           [ids.flatten(), ["c{0}".format(i )for i in np.arange(4)+1]],
                       names = ["marker", ""] ))

data = data.unstack().swaplevel(0, 1, axis = 1).stack()
data["m1"] = data[["c1", "c2"]].mean(axis = 1)
data["m2"] = data[["c2", "c3"]].mean(axis = 1)
data["m3"] = data[["c3", "c4"]].mean(axis = 1)
data["m4"] = data[["c4", "c1"]].mean(axis = 1)
data["o"] = data[["m1", "m2", "m3", "m4"]].mean(axis = 1)
data
c1 c2 c3 c4 m1 m2 m3 m4 o
marker
1 x 1209.0 1297.0 1290.0 1201.0 1253.0 1293.5 1245.5 1205.0 1249.25
y 1217.0 1218.0 1287.0 1286.0 1217.5 1252.5 1286.5 1251.5 1252.00
3 x 2736.0 2764.0 2723.0 2701.0 2750.0 2743.5 2712.0 2718.5 2731.00
y 1132.0 1183.0 1241.0 1191.0 1157.5 1212.0 1216.0 1161.5 1186.75
4 x 1140.0 1129.0 1214.0 1226.0 1134.5 1171.5 1220.0 1183.0 1177.25
y 1120.0 1059.0 1048.0 1108.0 1089.5 1053.5 1078.0 1114.0 1083.75
5 x 2071.0 2101.0 2162.0 2132.0 2086.0 2131.5 2147.0 2101.5 2116.50
y 1279.0 1233.0 1267.0 1314.0 1256.0 1250.0 1290.5 1296.5 1273.25
6 x 1507.0 1510.0 1421.0 1419.0 1508.5 1465.5 1420.0 1463.0 1464.25
y 1244.0 1309.0 1313.0 1245.0 1276.5 1311.0 1279.0 1244.5 1277.75
7 x 2247.0 2306.0 2263.0 2203.0 2276.5 2284.5 2233.0 2225.0 2254.75
y 1604.0 1653.0 1691.0 1643.0 1628.5 1672.0 1667.0 1623.5 1647.75
9 x 940.0 933.0 840.0 849.0 936.5 886.5 844.5 894.5 890.50
y 1212.0 1282.0 1285.0 1216.0 1247.0 1283.5 1250.5 1214.0 1248.75
10 x 990.0 906.0 885.0 968.0 948.0 895.5 926.5 979.0 937.25
y 1050.0 1071.0 1013.0 993.0 1060.5 1042.0 1003.0 1021.5 1031.75
11 x 1339.0 1413.0 1434.0 1358.0 1376.0 1423.5 1396.0 1348.5 1386.00
y 951.0 934.0 981.0 999.0 942.5 957.5 990.0 975.0 966.25
12 x 1586.0 1513.0 1543.0 1616.0 1549.5 1528.0 1579.5 1601.0 1564.50
y 950.0 929.0 879.0 899.0 939.5 904.0 889.0 924.5 914.25
# Plante un peu...
"""cap = cv2.VideoCapture('_data/AeroTrain.mp4')
while(cap.isOpened()):
    ret, frame = cap.read()

    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    cv2.imshow('frame',gray)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

cap.release()
cv2.destroyAllWindows()"""
"cap = cv2.VideoCapture('_data/AeroTrain.mp4')nwhile(cap.isOpened()):n    ret, frame = cap.read()nn    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)nn    cv2.imshow('frame',gray)n    if cv2.waitKey(1) & 0xFF == ord('q'):n        breaknncap.release()ncv2.destroyAllWindows()"
cap = cv2.VideoCapture('_data/AeroTrain.mp4')
nframe = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

print("nframe =", nframe)
cap.set(1, 300) # arguments: 1: laisser, 2: numéro du frame
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
plt.figure()
plt.imshow(gray)
plt.show()
cap.release()
nframe = 712
<IPython.core.display.Javascript object>
%%time
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters =  aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
frame_markers = aruco.drawDetectedMarkers(frame.copy(), corners, ids)
Wall time: 31.3 ms
plt.figure()
plt.imshow(frame_markers, origin = "upper")
if ids is not None:
    for i in range(len(ids)):
        c = corners[i][0]
        plt.plot([c[:, 0].mean()], [c[:, 1].mean()], "+", label = "id={0}".format(ids[i]))
"""for points in rejectedImgPoints:
    y = points[:, 0]
    x = points[:, 1]
    plt.plot(x, y, ".m-", linewidth = 1.)"""
plt.legend()
plt.show()
<IPython.core.display.Javascript object>
help(aruco.DetectorParameters_create)
Help on built-in function DetectorParameters_create:

DetectorParameters_create(...)
    DetectorParameters_create() -> retval
    .
Sandbox
Ludovic

Note

This notebook can be downloaded here: aruco_calibration_rotation.ipynb

Camera calibration using CHARUCO
import numpy as np
import cv2, PIL, os
from cv2 import aruco
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
%matplotlib nbagg
2. Camera pose estimation using CHARUCO chessboard

First, let’s create the board.

workdir = "./workdir/"
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
board = aruco.CharucoBoard_create(7, 5, 1, .8, aruco_dict)
imboard = board.draw((2000, 2000))
cv2.imwrite(workdir + "chessboard.tiff", imboard)
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.imshow(imboard, cmap = mpl.cm.gray, interpolation = "nearest")
ax.axis("off")
plt.show()
<IPython.core.display.Javascript object>

And take photos of it from multiple angles, for example:

datadir = "../../data/calib_tel_ludo/"
images = np.array([datadir + f for f in os.listdir(datadir) if f.endswith(".png") ])
order = np.argsort([int(p.split(".")[-2].split("_")[-1]) for p in images])
images = images[order]
images
array(['../../data/calib_tel_ludo/VID_20180406_085421_0.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_5.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_10.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_15.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_20.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_25.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_30.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_35.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_40.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_45.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_50.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_55.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_60.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_65.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_70.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_75.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_80.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_85.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_90.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_95.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_100.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_105.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_110.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_115.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_120.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_125.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_130.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_135.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_140.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_145.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_150.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_155.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_160.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_165.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_170.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_175.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_180.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_185.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_190.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_195.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_200.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_205.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_210.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_215.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_220.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_225.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_230.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_235.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_240.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_245.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_250.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_255.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_260.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_265.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_270.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_275.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_280.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_285.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_290.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_295.png',
       '../../data/calib_tel_ludo/VID_20180406_085421_300.png'],
      dtype='<U53')
im = PIL.Image.open(images[0])
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
plt.imshow(im)
#ax.axis('off')
plt.show()
<IPython.core.display.Javascript object>

Now, the camera calibration can be done using all the images of the chessboard. Two functions are necessary:

  • The first will detect markers on all the images and.
  • The second will proceed the detected markers to estimage the camera calibration data.
def read_chessboards(images):
    """
    Charuco base pose estimation.
    """
    print("POSE ESTIMATION STARTS:")
    allCorners = []
    allIds = []
    decimator = 0
    # SUB PIXEL CORNER DETECTION CRITERION
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.00001)

    for im in images:
        print("=> Processing image {0}".format(im))
        frame = cv2.imread(im)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        corners, ids, rejectedImgPoints = cv2.aruco.detectMarkers(gray, aruco_dict)

        if len(corners)>0:
            # SUB PIXEL DETECTION
            for corner in corners:
                cv2.cornerSubPix(gray, corner,
                                 winSize = (3,3),
                                 zeroZone = (-1,-1),
                                 criteria = criteria)
            res2 = cv2.aruco.interpolateCornersCharuco(corners,ids,gray,board)
            if res2[1] is not None and res2[2] is not None and len(res2[1])>3 and decimator%1==0:
                allCorners.append(res2[1])
                allIds.append(res2[2])

        decimator+=1

    imsize = gray.shape
    return allCorners,allIds,imsize
allCorners,allIds,imsize=read_chessboards(images)
POSE ESTIMATION STARTS:
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_0.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_5.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_10.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_15.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_20.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_25.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_30.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_35.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_40.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_45.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_50.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_55.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_60.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_65.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_70.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_75.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_80.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_85.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_90.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_95.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_100.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_105.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_110.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_115.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_120.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_125.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_130.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_135.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_140.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_145.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_150.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_155.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_160.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_165.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_170.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_175.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_180.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_185.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_190.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_195.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_200.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_205.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_210.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_215.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_220.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_225.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_230.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_235.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_240.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_245.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_250.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_255.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_260.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_265.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_270.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_275.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_280.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_285.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_290.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_295.png
=> Processing image ../../data/calib_tel_ludo/VID_20180406_085421_300.png
def calibrate_camera(allCorners,allIds,imsize):
    """
    Calibrates the camera using the dected corners.
    """
    print("CAMERA CALIBRATION")

    cameraMatrixInit = np.array([[ 1000.,    0., imsize[0]/2.],
                                 [    0., 1000., imsize[1]/2.],
                                 [    0.,    0.,           1.]])

    distCoeffsInit = np.zeros((5,1))
    flags = (cv2.CALIB_USE_INTRINSIC_GUESS + cv2.CALIB_RATIONAL_MODEL + cv2.CALIB_FIX_ASPECT_RATIO)
    #flags = (cv2.CALIB_RATIONAL_MODEL)
    (ret, camera_matrix, distortion_coefficients0,
     rotation_vectors, translation_vectors,
     stdDeviationsIntrinsics, stdDeviationsExtrinsics,
     perViewErrors) = cv2.aruco.calibrateCameraCharucoExtended(
                      charucoCorners=allCorners,
                      charucoIds=allIds,
                      board=board,
                      imageSize=imsize,
                      cameraMatrix=cameraMatrixInit,
                      distCoeffs=distCoeffsInit,
                      flags=flags,
                      criteria=(cv2.TERM_CRITERIA_EPS & cv2.TERM_CRITERIA_COUNT, 10000, 1e-9))

    return ret, camera_matrix, distortion_coefficients0, rotation_vectors, translation_vectors
%time ret, mtx, dist, rvecs, tvecs = calibrate_camera(allCorners,allIds,imsize)
CAMERA CALIBRATION
CPU times: user 10.3 s, sys: 8.89 s, total: 19.2 s
Wall time: 5.26 s
ret
0.6363938527748627
mtx
array([[1.78952655e+03, 0.00000000e+00, 9.69572430e+02],
       [0.00000000e+00, 1.78952655e+03, 5.64872516e+02],
       [0.00000000e+00, 0.00000000e+00, 1.00000000e+00]])
dist
array([[ 5.33659854e+00],
       [-1.67904382e+02],
       [ 3.32943561e-03],
       [-4.67385863e-03],
       [ 9.75622127e+02],
       [ 5.14691206e+00],
       [-1.66105367e+02],
       [ 9.69643912e+02],
       [ 0.00000000e+00],
       [ 0.00000000e+00],
       [ 0.00000000e+00],
       [ 0.00000000e+00],
       [ 0.00000000e+00],
       [ 0.00000000e+00]])
Check calibration results
i=20 # select image id
plt.figure()
frame = cv2.imread(images[i])
img_undist = cv2.undistort(frame,mtx,dist,None)
plt.subplot(1,2,1)
plt.imshow(frame)
plt.title("Raw image")
plt.axis("off")
plt.subplot(1,2,2)
plt.imshow(img_undist)
plt.title("Corrected image")
plt.axis("off")
plt.show()
<IPython.core.display.Javascript object>
3 . Use of camera calibration to estimate 3D translation and rotation of each marker on a scene
frame = cv2.imread("../../data/IMG_20180406_095219.jpg")
#frame = cv2.undistort(src = frame, cameraMatrix = mtx, distCoeffs = dist)
plt.figure()
plt.imshow(frame, interpolation = "nearest")
plt.show()
<IPython.core.display.Javascript object>
Post processing
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
aruco_dict = aruco.Dictionary_get(aruco.DICT_6X6_250)
parameters =  aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict,
                                                      parameters=parameters)
# SUB PIXEL DETECTION
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.0001)
for corner in corners:
    cv2.cornerSubPix(gray, corner, winSize = (3,3), zeroZone = (-1,-1), criteria = criteria)

frame_markers = aruco.drawDetectedMarkers(frame.copy(), corners, ids)

corners
[array([[[1211.    , 1744.    ],
         [1002.    , 1678.    ],
         [1095.    , 1553.    ],
         [1298.5002, 1611.7025]]], dtype=float32),
 array([[[1067.8948, 1503.2638],
         [ 880.    , 1447.    ],
         [ 971.8308, 1339.5516],
         [1155.9335, 1390.4458]]], dtype=float32),
 array([[[1589., 2408.],
         [1330., 2308.],
         [1423., 2120.],
         [1671., 2208.]]], dtype=float32),
 array([[[2033., 2261.],
         [1772., 2174.],
         [1835., 2005.],
         [2083., 2083.]]], dtype=float32),
 array([[[ 935., 2158.],
         [ 706., 2076.],
         [ 827., 1911.],
         [1046., 1986.]]], dtype=float32),
 array([[[1378., 2036.],
         [1153., 1957.],
         [1245., 1810.],
         [1460., 1882.]]], dtype=float32),
 array([[[ 348., 1942.],
         [ 144., 1867.],
         [ 291., 1725.],
         [ 484., 1792.]]], dtype=float32),
 array([[[1782., 1928.],
         [1556., 1853.],
         [1624., 1717.],
         [1839., 1783.]]], dtype=float32),
 array([[[ 798., 1837.],
         [ 597., 1770.],
         [ 713., 1637.],
         [ 906., 1700.]]], dtype=float32),
 array([[[2154., 1823.],
         [1930., 1755.],
         [1977., 1630.],
         [2188., 1691.]]], dtype=float32),
 array([[[1580., 1650.],
         [1381., 1590.],
         [1449., 1482.],
         [1641., 1535.]]], dtype=float32),
 array([[[ 273., 1658.],
         [  98., 1592.],
         [ 231., 1478.],
         [ 403., 1539.]]], dtype=float32),
 array([[[ 688., 1574.],
         [ 509., 1517.],
         [ 617., 1412.],
         [ 790., 1465.]]], dtype=float32),
 array([[[1415.4037, 1431.0923],
         [1225.0386, 1377.4968],
         [1300.2623, 1279.6166],
         [1483.125 , 1329.8298]]], dtype=float32),
 array([[[ 597.94867, 1363.2643 ],
         [ 421.2595 , 1307.9504 ],
         [ 535.3967 , 1211.2885 ],
         [ 704.76355, 1262.5137 ]]], dtype=float32),
 array([[[ 949.5966, 1301.637 ],
         [ 775.0423, 1250.5741],
         [ 867.6455, 1160.6498],
         [1035.8293, 1207.2653]]], dtype=float32),
 array([[[1929.4287, 1575.4489],
         [1717.393 , 1515.709 ],
         [1772.9988, 1407.4595],
         [1975.8889, 1461.9364]]], dtype=float32)]

Very fast processing !

Results
plt.figure()
plt.imshow(frame_markers, interpolation = "nearest")

plt.show()
<IPython.core.display.Javascript object>
Add local axis on each marker
size_of_marker =  0.0285 # side lenght of the marker in meter
rvecs,tvecs = aruco.estimatePoseSingleMarkers(corners, size_of_marker , mtx, dist)
length_of_axis = 0.1
imaxis = aruco.drawDetectedMarkers(frame.copy(), corners, ids)

for i in range(len(tvecs)):
    imaxis = aruco.drawAxis(imaxis, mtx, dist, rvecs[i], tvecs[i], length_of_axis)
plt.figure()
plt.imshow(imaxis)
plt.grid()
plt.show()
<IPython.core.display.Javascript object>
data = pd.DataFrame(data = tvecs.reshape(len(tvecs),3), columns = ["tx", "ty", "tz"],
                    index = ids.flatten())
data.index.name = "marker"
data.sort_index(inplace= True)
data
tx ty tz
marker
0 0.058386 0.185638 0.196745
1 -0.010302 0.166097 0.203390
2 -0.080577 0.156345 0.221786
3 0.116058 0.189125 0.216976
4 0.041465 0.165729 0.219531
5 -0.027355 0.148248 0.227437
6 -0.100679 0.140372 0.251642
7 0.097155 0.166912 0.238803
8 0.023141 0.137494 0.228381
9 -0.044928 0.130699 0.253189
10 0.163007 0.172988 0.267740
11 0.078322 0.144059 0.258836
12 0.006806 0.117907 0.247738
13 -0.057502 0.102385 0.255066
14 0.130086 0.136805 0.265516
15 0.056305 0.114980 0.261572
16 -0.009219 0.097941 0.264597
datar = pd.DataFrame(data = tvecs.reshape(len(rvecs),3), columns = ["rx", "ry", "rz"],
                    index = ids.flatten())
datar.index.name = "marker"
datar.sort_index(inplace= True)
np.degrees(datar)
rx ry rz
marker
0 3.345263 10.636263 11.272638
1 -0.590286 9.516639 11.653392
2 -4.616715 8.957911 12.707418
3 6.649625 10.836080 12.431815
4 2.375792 9.495585 12.578208
5 -1.567306 8.493977 13.031201
6 -5.768467 8.042731 14.418019
7 5.566548 9.563349 13.682404
8 1.325893 7.877812 13.085270
9 -2.574157 7.488515 14.506650
10 9.339589 9.911505 15.340350
11 4.487517 8.253984 14.830232
12 0.389962 6.755597 14.194362
13 -3.294606 5.866220 14.614201
14 7.453402 7.838356 15.212937
15 3.226061 6.587856 14.986982
16 -0.528237 5.611610 15.160290
v = data.loc[3:6].values
((v[1:] - v[:-1])**2).sum(axis = 1)**.5
array([0.07821726, 0.07144442, 0.07761642])
cv2.Rodrigues(rvecs[0], np.zeros((3,3)))
(array([[-0.86801078, -0.49450269, -0.04499303],
        [ 0.02324173,  0.05005109, -0.99847619],
        [ 0.49600111, -0.86773382, -0.03195179]]),
 array([[ 0.17008214, -0.35187266,  0.58606527, -0.60094699, -0.15414543,
         -0.02171528,  0.32580607,  0.19163346, -0.14667908],
        [ 0.2919512 , -0.48377297, -0.31537661,  0.54686264, -0.3578951 ,
         -0.00521095,  0.48529479,  0.25504826,  0.60693805],
        [-0.27940432,  0.44000776,  0.55432899, -0.27008959, -0.61042134,
         -0.03688581, -0.47630662, -0.28596013,  0.37208124]]))
fig = plt.figure()
#ax = fig.add_subplot(111, projection='3d')
ax = fig.add_subplot(1,2,1)
ax.set_aspect("equal")
plt.plot(data.tx, data.ty, "or-")
plt.grid()
ax = fig.add_subplot(1,2,2)
plt.imshow(imaxis, origin = "upper")
plt.plot(np.array(corners)[:, 0, 0,0], np.array(corners)[:, 0, 0,1], "or")
plt.show()
<IPython.core.display.Javascript object>
a = np.arange(50)
a
array([ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14, 15, 16,
       17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
       34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49])
import pickle
f = open("truc.pckl", "wb")
pickle.dump(a, f)
f.close()
f = open("truc.pckl", "rb")
b = pickle.load(f)
b == a
array([ True,  True,  True,  True,  True,  True,  True,  True,  True,
        True,  True,  True,  True,  True,  True,  True,  True,  True,
        True,  True,  True,  True,  True,  True,  True,  True,  True,
        True,  True,  True,  True,  True,  True,  True,  True,  True,
        True,  True,  True,  True,  True,  True,  True,  True,  True,
        True,  True,  True,  True,  True], dtype=bool)
corners = np.array(corners)
data2 = pd.DataFrame({"px": corners[:, 0, 0, 1],
                      "py": corners[:, 0, 0, 0]}, index = ids.flatten())
data2.sort_index(inplace=True)
data2
px py
0 177.324295 222.723907
1 174.117722 448.426971
5 165.167435 1385.455933
6 292.872223 348.533112
7 290.211761 572.901550
8 286.861359 800.593140
9 285.043823 1029.405640
10 284.054932 1261.753418
11 406.743347 250.763550
12 405.577484 469.121307
13 402.066681 691.525330
14 398.973602 918.603577
16 397.476105 1371.831177
17 514.600769 374.230682
18 512.135010 590.534302
19 509.453247 809.594849
20 507.959595 1029.593262
21 507.521088 1253.295044
22 615.594482 280.054901
23 614.357056 490.602081
24 613.074951 704.512085
25 611.417297 922.586426
26 611.085632 1139.391602
27 611.036255 1359.634644
28 716.764465 397.975067
29 716.205688 606.338318
30 714.187927 817.897095
31 713.494141 1029.665405
32 713.155762 1244.999390
33 811.479309 305.960754
34 811.358704 509.836670
35 810.507996 716.540955
36 810.085144 926.713257
37 810.013611 1135.423462
38 810.014771 1347.564697
39 906.448242 420.143951
40 906.174988 621.917664
41 905.748413 825.513733
42 906.083923 1029.803955
43 906.387878 1237.707520
m0 = data2.loc[0]
m43 = data2.loc[43]
d01 = ((m0 - m43).values**2).sum()**.5
d = 42.5e-3 * (3.5**2 + 4.5**2)**.5
factor = d / d01
data2["x"] = data2.px * factor
data2["y"] = data2.py * factor
((data2[["x", "y"]].loc[11] - data2[["x", "y"]].loc[0]).values**2).sum()**.5
0.043476117957396747
c = np.array(corners).astype(np.float64).reshape(44,4,2)
(((c[:, 1:] - c[:, :-1])**2).sum(axis = 2)**.5).mean(axis =1)
array([ 138.33575835,  143.00113377,  142.012097  ,  140.69699432,
        146.66782406,  144.02442319,  138.67845434,  142.33812925,
        143.00229095,  140.33926025,  140.35356753,  146.66786569,
        139.34054504,  146.67222201,  140.03570454,  148.01939184,
        143.35647769,  142.67236143,  147.01931296,  148.02127735,
        137.67392157,  135.35308209,  141.00354688,  143.67946992,
        137.67149733,  138.67392207,  145.00112611,  142.33454105,
        138.3466791 ,  143.00234925,  139.0035972 ,  143.00115739,
        143.6865917 ,  144.67964727,  144.33446711,  141.67253496,
        143.67117097,  147.67232772,  150.35663387,  141.70034559,
        149.01342342,  146.01949591,  144.34013329,  150.35333222])
c
array([[[ 2406.,  1940.],
        [ 2546.,  1940.],
        [ 2545.,  2075.],
        [ 2405.,  2076.]],

       [[ 1991.,  1938.],
        [ 2138.,  1939.],
        [ 2138.,  2076.],
        [ 1993.,  2076.]],

       [[ 1584.,  1936.],
        [ 1728.,  1936.],
        [ 1731.,  2073.],
        [ 1586.,  2072.]],

       [[ 2619.,  1735.],
        [ 2759.,  1735.],
        [ 2754.,  1878.],
        [ 2615.,  1877.]],

       [[ 2198.,  1734.],
        [ 2347.,  1734.],
        [ 2346.,  1878.],
        [ 2199.,  1878.]],

       [[  973.,  1733.],
        [ 1117.,  1731.],
        [ 1121.,  1874.],
        [  976.,  1875.]],

       [[  572.,  1732.],
        [  710.,  1732.],
        [  713.,  1874.],
        [  577.,  1873.]],

       [[ 2410.,  1533.],
        [ 2554.,  1533.],
        [ 2552.,  1672.],
        [ 2408.,  1672.]],

       [[ 1373.,  1326.],
        [ 1519.,  1325.],
        [ 1519.,  1463.],
        [ 1374.,  1464.]],

       [[ 1785.,  1326.],
        [ 1926.,  1324.],
        [ 1927.,  1463.],
        [ 1786.,  1463.]],

       [[ 2627.,  1323.],
        [ 2767.,  1324.],
        [ 2763.,  1464.],
        [ 2622.,  1464.]],

       [[ 2200.,  1324.],
        [ 2350.,  1324.],
        [ 2349.,  1463.],
        [ 2198.,  1463.]],

       [[  760.,  1128.],
        [  901.,  1127.],
        [  903.,  1265.],
        [  764.,  1266.]],

       [[ 1988.,  1123.],
        [ 2138.,  1121.],
        [ 2138.,  1261.],
        [ 1988.,  1262.]],

       [[  547.,   920.],
        [  687.,   918.],
        [  692.,  1058.],
        [  552.,  1059.]],

       [[ 2203.,   910.],
        [ 2354.,   908.],
        [ 2351.,  1050.],
        [ 2200.,  1052.]],

       [[ 2631.,   908.],
        [ 2775.,   906.],
        [ 2771.,  1050.],
        [ 2629.,  1050.]],

       [[  750.,   708.],
        [  890.,   707.],
        [  892.,   855.],
        [  752.,   855.]],

       [[ 2419.,   695.],
        [ 2565.,   693.],
        [ 2563.,   842.],
        [ 2417.,   845.]],

       [[  946.,   494.],
        [ 1093.,   491.],
        [ 1096.,   642.],
        [  950.,   643.]],

       [[ 1181.,  1936.],
        [ 1319.,  1935.],
        [ 1321.,  2073.],
        [ 1184.,  2072.]],

       [[  780.,  1935.],
        [  916.,  1935.],
        [  920.,  2070.],
        [  785.,  2070.]],

       [[ 1788.,  1731.],
        [ 1928.,  1732.],
        [ 1929.,  1876.],
        [ 1790.,  1875.]],

       [[ 1378.,  1731.],
        [ 1521.,  1730.],
        [ 1524.,  1873.],
        [ 1379.,  1874.]],

       [[  771.,  1533.],
        [  909.,  1533.],
        [  911.,  1671.],
        [  774.,  1671.]],

       [[ 1176.,  1533.],
        [ 1315.,  1532.],
        [ 1317.,  1669.],
        [ 1177.,  1670.]],

       [[ 1989.,  1532.],
        [ 2137.,  1532.],
        [ 2137.,  1671.],
        [ 1989.,  1670.]],

       [[ 1581.,  1531.],
        [ 1726.,  1531.],
        [ 1727.,  1669.],
        [ 1583.,  1669.]],

       [[  560.,  1329.],
        [  700.,  1328.],
        [  703.,  1465.],
        [  565.,  1466.]],

       [[  966.,  1328.],
        [ 1112.,  1327.],
        [ 1113.,  1465.],
        [  968.,  1465.]],

       [[ 1169.,  1127.],
        [ 1309.,  1126.],
        [ 1310.,  1264.],
        [ 1171.,  1265.]],

       [[ 1579.,  1124.],
        [ 1723.,  1123.],
        [ 1723.,  1263.],
        [ 1578.,  1263.]],

       [[ 2415.,  1120.],
        [ 2560.,  1119.],
        [ 2556.,  1261.],
        [ 2412.,  1261.]],

       [[  956.,   919.],
        [ 1103.,   918.],
        [ 1106.,  1058.],
        [  959.,  1059.]],

       [[ 1367.,   917.],
        [ 1514.,   916.],
        [ 1514.,  1056.],
        [ 1368.,  1056.]],

       [[ 1784.,   914.],
        [ 1926.,   912.],
        [ 1926.,  1053.],
        [ 1784.,  1054.]],

       [[ 1160.,   706.],
        [ 1302.,   706.],
        [ 1304.,   854.],
        [ 1163.,   854.]],

       [[ 1574.,   703.],
        [ 1722.,   702.],
        [ 1722.,   850.],
        [ 1575.,   852.]],

       [[ 1991.,   699.],
        [ 2142.,   697.],
        [ 2138.,   847.],
        [ 1988.,   848.]],

       [[  539.,   499.],
        [  677.,   496.],
        [  681.,   644.],
        [  542.,   646.]],

       [[ 1360.,   490.],
        [ 1508.,   488.],
        [ 1510.,   639.],
        [ 1362.,   641.]],

       [[ 1784.,   486.],
        [ 1928.,   483.],
        [ 1926.,   635.],
        [ 1784.,   637.]],

       [[ 2637.,   479.],
        [ 2778.,   480.],
        [ 2776.,   630.],
        [ 2634.,   629.]],

       [[ 2207.,   481.],
        [ 2356.,   478.],
        [ 2356.,   629.],
        [ 2205.,   632.]]])
help(cv2.aruco.detectMarkers)
Help on built-in function detectMarkers:

detectMarkers(...)
    detectMarkers(image, dictionary[, corners[, ids[, parameters[, rejectedImgPoints]]]]) -> corners, ids, rejectedImgPoints
(480, 640, 3)

Tools

Note

This notebook can be downloaded here: video_to_image.ipynb

Video to image
import numpy as np
import cv2, PIL, os
from cv2 import aruco
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
%matplotlib nbagg
workdir = "../Aruco/data/calib_tel_ludo2/"
name = "VID_20180406_104312.mp4"
rootname = name.split(".")[0]
cap = cv2.VideoCapture(workdir + name)
counter = 0
each = 10
length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
for i in range(length):
    ret, frame = cap.read()
    if i % each == 0: cv2.imwrite(workdir + rootname + "_{0}".format(i) + ".png", frame)

cap.release()
os.listdir("../Aruco/data/calib_tel_ludo/")
['VID_20180406_085421_210.png',
 'VID_20180406_085421_230.png',
 'VID_20180406_085421_150.png',
 'VID_20180406_085421_160.png',
 'VID_20180406_085421_65.png',
 'VID_20180406_085421_135.png',
 'VID_20180406_085421_0.png',
 'VID_20180406_085421_255.png',
 'VID_20180406_085421_280.png',
 'VID_20180406_085421_85.png',
 'VID_20180406_085421_165.png',
 'VID_20180406_085421_260.png',
 'VID_20180406_085421_100.png',
 'VID_20180406_085421_245.png',
 'VID_20180406_085421_155.png',
 'VID_20180406_085421_185.png',
 'VID_20180406_085421_250.png',
 'VID_20180406_085421_215.png',
 'VID_20180406_085421_5.png',
 'VID_20180406_085421_15.png',
 'VID_20180406_085421_145.png',
 'VID_20180406_085421_70.png',
 'VID_20180406_085421_270.png',
 'VID_20180406_085421_60.png',
 'VID_20180406_085421_235.png',
 'VID_20180406_085421_290.png',
 'VID_20180406_085421_120.png',
 'VID_20180406_085421_95.png',
 'VID_20180406_085421_170.png',
 'VID_20180406_085421_195.png',
 'VID_20180406_085421_50.png',
 'VID_20180406_085421_225.png',
 'VID_20180406_085421.mp4',
 'VID_20180406_085421_190.png',
 'VID_20180406_085421_275.png',
 'VID_20180406_085421_295.png',
 'VID_20180406_085421_30.png',
 'VID_20180406_085421_75.png',
 'VID_20180406_085421_175.png',
 'VID_20180406_085421_200.png',
 'VID_20180406_085421_140.png',
 'VID_20180406_085421_115.png',
 'VID_20180406_085421_10.png',
 'VID_20180406_085421_80.png',
 'VID_20180406_085421_25.png',
 'VID_20180406_085421_130.png',
 'VID_20180406_085421_110.png',
 'VID_20180406_085421_105.png',
 'VID_20180406_085421_40.png',
 'VID_20180406_085421_205.png',
 'VID_20180406_085421_125.png',
 'VID_20180406_085421_35.png',
 'VID_20180406_085421_90.png',
 'VID_20180406_085421_265.png',
 'VID_20180406_085421_240.png',
 'VID_20180406_085421_300.png',
 'VID_20180406_085421_285.png',
 'VID_20180406_085421_55.png',
 'VID_20180406_085421_220.png',
 'VID_20180406_085421_180.png',
 'VID_20180406_085421_45.png',
 'VID_20180406_085421_20.png']
int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
0