Development of the ocr part of AOI
Samo Penic
2018-11-21 d88ce4da04499fd9f48d7a21a7ecded8535e9ab2
aoi_ocr/Ocr.py
@@ -2,17 +2,18 @@
from .sid_process import getSID
import cv2
import numpy as np
import os
import pkg_resources
markerfile = '/template.png'  # always use slash
markerfile = '/template-sq.png'  # always use slash
markerfilename = pkg_resources.resource_filename(__name__, markerfile)
class Paper:
    def __init__(self, filename=None, sid_classifier=None, settings=None):
    def __init__(self, filename=None, sid_classifier=None, settings=None, output_path="/tmp"):
        self.filename = filename
        self.output_path=output_path
        self.invalid = None
        self.QRData = None
        self.settings = {"answer_threshold": 0.25} if settings is None else settings
@@ -40,6 +41,7 @@
            return
        self.decodeQRandRotate()
        self.imgTreshold()
        cv2.imwrite('/tmp/debug_threshold.png', self.bwimg)
        skewAngle = 0
        #       try:
        #          skewAngle=self.getSkewAngle()
@@ -95,7 +97,6 @@
        self.imgHeight, self.imgWidth = self.img.shape[0:2]
        # todo, make better tresholding
    def imgTreshold(self):
        (self.thresh, self.bwimg) = cv2.threshold(
            self.img, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU
@@ -134,7 +135,7 @@
    def locateUpMarkers(self, threshold=0.85, height=200):
        template = cv2.imread(markerfilename, 0)
        w, h = template.shape[::-1]
        crop_img = self.img[0:height, :]
        crop_img = self.bwimg[0:height, :]
        res = cv2.matchTemplate(crop_img, template, cv2.TM_CCOEFF_NORMED)
        loc = np.where(res >= threshold)
        cimg = cv2.cvtColor(crop_img, cv2.COLOR_GRAY2BGR)
@@ -171,7 +172,8 @@
    def locateRightMarkers(self, threshold=0.85, width=200):
        template = cv2.imread(markerfilename, 0)
        w, h = template.shape[::-1]
        crop_img = self.img[:, -width:]
        crop_img = self.bwimg[:, -width:]
        cv2.imwrite('/tmp/debug_right.png', crop_img)
        res = cv2.matchTemplate(crop_img, template, cv2.TM_CCOEFF_NORMED)
        loc = np.where(res >= threshold)
        cimg = cv2.cvtColor(crop_img, cv2.COLOR_GRAY2BGR)
@@ -187,9 +189,13 @@
                    loc_filtered_y.append(pt[1])
                    loc_filtered_x.append(pt[0])
                    # order by y coordinate
            loc_filtered_y, loc_filtered_x = zip(
                *sorted(zip(loc_filtered_y, loc_filtered_x))
            )
            try:
                loc_filtered_y, loc_filtered_x = zip(
                    *sorted(zip(loc_filtered_y, loc_filtered_x))
                )
            except:
                self.yMarkerLocations=[np.array([1,1]),np.array([1,2])]
                return self.yMarkerLocations
            # loc=[loc_filtered_y,loc_filtered_x]
            # remove duplicates
            a = np.diff(loc_filtered_y) > 40
@@ -260,7 +266,7 @@
        if self.QRDecode[0].type == "EAN13":
            return {
                "exam_id": int(qrdata[0:7]),
                "page_no": int(qrdata[7]),
                "page_no": int(qrdata[7])+1,
                "paper_id": int(qrdata[-5:-1]),
                "faculty_id": None,
                "sid": None,
@@ -269,9 +275,10 @@
            data = qrdata.split(",")
            retval = {
                "exam_id": int(data[1]),
                "page_no": int(data[3]),
                "page_no": int(data[3])+1,
                "paper_id": int(data[2]),
                "faculty_id": int(data[0]),
                "sid": None
            }
            if len(data) > 4:
                retval["sid"] = data[4]
@@ -280,7 +287,7 @@
    def get_paper_ocr_data(self):
        data = self.get_code_data()
        data["qr"] = self.QRData
        data["qr"] = bytes.decode(self.QRData, 'utf8')
        data["errors"] = self.errors
        data["warnings"] = self.warnings
        data["up_position"] = (
@@ -294,6 +301,9 @@
        data["ans_matrix"] = (
            (np.array(self.answerMatrix) > self.settings["answer_threshold"]) * 1
        ).tolist()
        if data["sid"] is None and data["page_no"] == 0:
        if data["sid"] is None and data["page_no"] == 2:
            data["sid"] = self.get_enhanced_sid()
        output_filename=os.path.join(self.output_path, '.'.join(self.filename.split('/')[-1].split('.')[:-1])+".png")
        cv2.imwrite(output_filename, self.img)
        data['output_filename']=output_filename
        return data