Development of the ocr part of AOI
Samo Penic
2018-11-21 5460bf601a854c842342a740df0f6d36ad785bbc
aoi_ocr/Ocr.py
@@ -5,7 +5,7 @@
import os
import pkg_resources
markerfile = '/template.png'  # always use slash
markerfile = '/template-sq.png'  # always use slash
markerfilename = pkg_resources.resource_filename(__name__, markerfile)
@@ -41,6 +41,7 @@
            return
        self.decodeQRandRotate()
        self.imgTreshold()
        cv2.imwrite('/tmp/debug_threshold.png', self.bwimg)
        skewAngle = 0
        #       try:
        #          skewAngle=self.getSkewAngle()
@@ -96,10 +97,10 @@
        self.imgHeight, self.imgWidth = self.img.shape[0:2]
        # todo, make better tresholding
    def imgTreshold(self):
        (self.thresh, self.bwimg) = cv2.threshold(
            self.img, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU
            self.img, 128, 255,
            cv2.THRESH_BINARY | cv2.THRESH_OTSU
        )
    def getSkewAngle(self):
@@ -135,7 +136,7 @@
    def locateUpMarkers(self, threshold=0.85, height=200):
        template = cv2.imread(markerfilename, 0)
        w, h = template.shape[::-1]
        crop_img = self.img[0:height, :]
        crop_img = self.bwimg[0:height, :]
        res = cv2.matchTemplate(crop_img, template, cv2.TM_CCOEFF_NORMED)
        loc = np.where(res >= threshold)
        cimg = cv2.cvtColor(crop_img, cv2.COLOR_GRAY2BGR)
@@ -172,7 +173,8 @@
    def locateRightMarkers(self, threshold=0.85, width=200):
        template = cv2.imread(markerfilename, 0)
        w, h = template.shape[::-1]
        crop_img = self.img[:, -width:]
        crop_img = self.bwimg[:, -width:]
        cv2.imwrite('/tmp/debug_right.png', crop_img)
        res = cv2.matchTemplate(crop_img, template, cv2.TM_CCOEFF_NORMED)
        loc = np.where(res >= threshold)
        cimg = cv2.cvtColor(crop_img, cv2.COLOR_GRAY2BGR)
@@ -188,9 +190,13 @@
                    loc_filtered_y.append(pt[1])
                    loc_filtered_x.append(pt[0])
                    # order by y coordinate
            loc_filtered_y, loc_filtered_x = zip(
                *sorted(zip(loc_filtered_y, loc_filtered_x))
            )
            try:
                loc_filtered_y, loc_filtered_x = zip(
                    *sorted(zip(loc_filtered_y, loc_filtered_x))
                )
            except:
                self.yMarkerLocations=[np.array([1,1]),np.array([1,2])]
                return self.yMarkerLocations
            # loc=[loc_filtered_y,loc_filtered_x]
            # remove duplicates
            a = np.diff(loc_filtered_y) > 40
@@ -237,7 +243,7 @@
        es, err, warn = getSID(
            self.img[
                int(0.04 * self.imgHeight) : int(0.095 * self.imgHeight),
                int(0.7 * self.imgWidth) : int(0.99 * self.imgWidth),
                int(0.65 * self.imgWidth) : int(0.95 * self.imgWidth),
            ],
            self.sid_classifier,
            sid_mask,
@@ -270,9 +276,10 @@
            data = qrdata.split(",")
            retval = {
                "exam_id": int(data[1]),
                "page_no": int(data[3])+1,
                "page_no": int(data[3]),
                "paper_id": int(data[2]),
                "faculty_id": int(data[0]),
                "sid": None
            }
            if len(data) > 4:
                retval["sid"] = data[4]
@@ -285,11 +292,11 @@
        data["errors"] = self.errors
        data["warnings"] = self.warnings
        data["up_position"] = (
            list(self.xMarkerLocations[1] / self.imgWidth),
            list(self.yMarkerLocations[1] / self.imgHeight),
            list(self.xMarkerLocations[0] / self.imgWidth),
            list(self.xMarkerLocations[1] / self.imgHeight),
        )
        data["right_position"] = (
            list(self.xMarkerLocations[1] / self.imgWidth),
            list(self.yMarkerLocations[0] / self.imgWidth),
            list(self.yMarkerLocations[1] / self.imgHeight),
        )
        data["ans_matrix"] = (