Development of the ocr part of AOI
Samo Penic
2019-01-24 8dad6520729d26d90ebee48939d7f2a1fd73dd38
aoi_ocr/Ocr.py
@@ -5,7 +5,7 @@
import os
import pkg_resources
markerfile = '/template.png'  # always use slash
markerfile = '/template-sq.png'  # always use slash
markerfilename = pkg_resources.resource_filename(__name__, markerfile)
@@ -41,6 +41,7 @@
            return
        self.decodeQRandRotate()
        self.imgTreshold()
        cv2.imwrite('/tmp/debug_threshold.png', self.bwimg)
        skewAngle = 0
        #       try:
        #          skewAngle=self.getSkewAngle()
@@ -96,10 +97,10 @@
        self.imgHeight, self.imgWidth = self.img.shape[0:2]
        # todo, make better tresholding
    def imgTreshold(self):
        (self.thresh, self.bwimg) = cv2.threshold(
            self.img, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU
            self.img, 128, 255,
            cv2.THRESH_BINARY | cv2.THRESH_OTSU
        )
    def getSkewAngle(self):
@@ -135,7 +136,7 @@
    def locateUpMarkers(self, threshold=0.85, height=200):
        template = cv2.imread(markerfilename, 0)
        w, h = template.shape[::-1]
        crop_img = self.img[0:height, :]
        crop_img = self.bwimg[0:height, :]
        res = cv2.matchTemplate(crop_img, template, cv2.TM_CCOEFF_NORMED)
        loc = np.where(res >= threshold)
        cimg = cv2.cvtColor(crop_img, cv2.COLOR_GRAY2BGR)
@@ -172,7 +173,8 @@
    def locateRightMarkers(self, threshold=0.85, width=200):
        template = cv2.imread(markerfilename, 0)
        w, h = template.shape[::-1]
        crop_img = self.img[:, -width:]
        crop_img = self.bwimg[:, -width:]
        cv2.imwrite('/tmp/debug_right.png', crop_img)
        res = cv2.matchTemplate(crop_img, template, cv2.TM_CCOEFF_NORMED)
        loc = np.where(res >= threshold)
        cimg = cv2.cvtColor(crop_img, cv2.COLOR_GRAY2BGR)
@@ -188,9 +190,13 @@
                    loc_filtered_y.append(pt[1])
                    loc_filtered_x.append(pt[0])
                    # order by y coordinate
            loc_filtered_y, loc_filtered_x = zip(
                *sorted(zip(loc_filtered_y, loc_filtered_x))
            )
            try:
                loc_filtered_y, loc_filtered_x = zip(
                    *sorted(zip(loc_filtered_y, loc_filtered_x))
                )
            except:
                self.yMarkerLocations=[np.array([1,1]),np.array([1,2])]
                return self.yMarkerLocations
            # loc=[loc_filtered_y,loc_filtered_x]
            # remove duplicates
            a = np.diff(loc_filtered_y) > 40
@@ -210,12 +216,12 @@
        self.locateUpMarkers()
        self.locateRightMarkers()
        roixoff = 10
        roiyoff = 5
        roiwidth = 50
        roixoff = 4
        roiyoff = 0
        roiwidth = 55
        roiheight = roiwidth
        totpx = roiwidth * roiheight
        cimg = cv2.cvtColor(self.img, cv2.COLOR_GRAY2BGR)
        self.answerMatrix = []
        for y in self.yMarkerLocations[0]:
            oneline = []
@@ -227,6 +233,8 @@
                # cv2.imwrite('ans_x'+str(x)+'_y_'+str(y)+'.png',roi)
                black = totpx - cv2.countNonZero(roi)
                oneline.append(black / totpx)
                cv2.rectangle(cimg, (x - roixoff,y - roiyoff), (x + int(roiwidth - roixoff),y + int(roiheight - roiyoff)), (0, 255, 255), 2)
            cv2.imwrite('/tmp/debug_answers.png',cimg)
            self.answerMatrix.append(oneline)
    def get_enhanced_sid(self):
@@ -237,7 +245,7 @@
        es, err, warn = getSID(
            self.img[
                int(0.04 * self.imgHeight) : int(0.095 * self.imgHeight),
                int(0.7 * self.imgWidth) : int(0.99 * self.imgWidth),
                int(0.65 * self.imgWidth) : int(0.95 * self.imgWidth),
            ],
            self.sid_classifier,
            sid_mask,
@@ -270,9 +278,10 @@
            data = qrdata.split(",")
            retval = {
                "exam_id": int(data[1]),
                "page_no": int(data[3])+1,
                "page_no": int(data[3]),
                "paper_id": int(data[2]),
                "faculty_id": int(data[0]),
                "sid": None
            }
            if len(data) > 4:
                retval["sid"] = data[4]
@@ -281,15 +290,17 @@
    def get_paper_ocr_data(self):
        data = self.get_code_data()
        if self.QRData is None:
            return None
        data["qr"] = bytes.decode(self.QRData, 'utf8')
        data["errors"] = self.errors
        data["warnings"] = self.warnings
        data["up_position"] = (
            list(self.xMarkerLocations[1] / self.imgWidth),
            list(self.yMarkerLocations[1] / self.imgHeight),
            list(self.xMarkerLocations[0] / self.imgWidth),
            list(self.xMarkerLocations[1] / self.imgHeight),
        )
        data["right_position"] = (
            list(self.xMarkerLocations[1] / self.imgWidth),
            list(self.yMarkerLocations[0] / self.imgWidth),
            list(self.yMarkerLocations[1] / self.imgHeight),
        )
        data["ans_matrix"] = (
@@ -300,4 +311,5 @@
        output_filename=os.path.join(self.output_path, '.'.join(self.filename.split('/')[-1].split('.')[:-1])+".png")
        cv2.imwrite(output_filename, self.img)
        data['output_filename']=output_filename
        print(np.array(self.answerMatrix))
        return data